From 70b38ec5a9c8b88b4f56a9d8b904bcc82d6cd3ad Mon Sep 17 00:00:00 2001 From: takehirokaneko Date: Wed, 9 Jul 2014 11:11:35 +0900 Subject: [PATCH] add initial source code --- .coveragerc | 7 + .gitignore | 45 + .testr.conf | 8 + README.rst | 22 + etc/api-paste.ini | 33 + etc/rack.conf.sample | 23 + openstack-common.conf | 47 + rack/__init__.py | 22 + rack/api/__init__.py | 82 + rack/api/auth.py | 188 + rack/api/common.py | 448 ++ rack/api/v1/__init__.py | 186 + rack/api/v1/groups.py | 212 + rack/api/v1/keypairs.py | 235 ++ rack/api/v1/networks.py | 254 ++ rack/api/v1/processes.py | 312 ++ rack/api/v1/securitygroups.py | 321 ++ rack/api/v1/views/__init__.py | 0 rack/api/v1/views/groups.py | 49 + rack/api/v1/views/keypairs.py | 51 + rack/api/v1/views/networks.py | 47 + rack/api/v1/views/processes.py | 56 + rack/api/v1/views/securitygroups.py | 50 + rack/api/versions.py | 243 ++ rack/api/views/__init__.py | 0 rack/api/views/versions.py | 96 + rack/api/wsgi.py | 1302 ++++++ rack/api/xmlutil.py | 993 +++++ rack/baserpc.py | 81 + rack/cmd/__init__.py | 33 + rack/cmd/api.py | 36 + rack/cmd/resourceoperator.py | 42 + rack/cmd/scheduler.py | 37 + rack/config.py | 35 + rack/context.py | 227 + rack/db/__init__.py | 18 + rack/db/api.py | 179 + rack/db/base.py | 36 + rack/db/migration.py | 37 + rack/db/sqlalchemy/__init__.py | 21 + rack/db/sqlalchemy/api.py | 647 +++ rack/db/sqlalchemy/migrate_repo/README | 4 + rack/db/sqlalchemy/migrate_repo/__init__.py | 0 rack/db/sqlalchemy/migrate_repo/manage.py | 19 + rack/db/sqlalchemy/migrate_repo/migrate.cfg | 20 + .../versions/001_Add_groups_table.py | 56 + .../versions/002_Add_services_table.py | 67 + .../versions/003_Add_keypairs_table.py | 66 + .../versions/004_Add_securitygroups_table.py | 67 + .../versions/005_Add_networks_table.py | 66 + .../versions/006_Add_processes_table.py | 76 + .../007_Add_processes_securitygroups_table.py | 60 + .../008_Add_processes_networks_table.py | 60 + .../migrate_repo/versions/__init__.py | 0 rack/db/sqlalchemy/migration.py | 85 + rack/db/sqlalchemy/models.py | 184 + rack/db/sqlalchemy/types.py | 61 + rack/db/sqlalchemy/utils.py | 606 +++ rack/debugger.py | 75 + rack/exception.py | 1598 +++++++ rack/manager.py | 114 + rack/netconf.py | 58 + rack/object.py | 569 +++ rack/openstack/__init__.py | 0 rack/openstack/common/README | 13 + rack/openstack/common/__init__.py | 2 + rack/openstack/common/cliutils.py | 63 + rack/openstack/common/config/__init__.py | 0 rack/openstack/common/config/generator.py | 302 ++ rack/openstack/common/context.py | 83 + rack/openstack/common/db/__init__.py | 0 rack/openstack/common/db/api.py | 162 + rack/openstack/common/db/exception.py | 56 + rack/openstack/common/db/options.py | 168 + .../common/db/sqlalchemy/__init__.py | 0 .../common/db/sqlalchemy/migration.py | 268 ++ rack/openstack/common/db/sqlalchemy/models.py | 115 + .../common/db/sqlalchemy/provision.py | 187 + .../openstack/common/db/sqlalchemy/session.py | 860 ++++ .../common/db/sqlalchemy/test_base.py | 149 + .../common/db/sqlalchemy/test_migrations.py | 269 ++ rack/openstack/common/db/sqlalchemy/utils.py | 638 +++ rack/openstack/common/eventlet_backdoor.py | 146 + rack/openstack/common/excutils.py | 99 + rack/openstack/common/fileutils.py | 137 + rack/openstack/common/fixture/__init__.py | 0 rack/openstack/common/fixture/config.py | 85 + rack/openstack/common/fixture/lockutils.py | 51 + rack/openstack/common/fixture/logging.py | 34 + rack/openstack/common/fixture/mockpatch.py | 51 + rack/openstack/common/fixture/moxstubout.py | 32 + rack/openstack/common/gettextutils.py | 474 +++ rack/openstack/common/imageutils.py | 144 + rack/openstack/common/importutils.py | 66 + rack/openstack/common/jsonutils.py | 178 + rack/openstack/common/local.py | 45 + rack/openstack/common/lockutils.py | 303 ++ rack/openstack/common/log.py | 655 +++ rack/openstack/common/loopingcall.py | 147 + rack/openstack/common/memorycache.py | 97 + rack/openstack/common/middleware/__init__.py | 0 rack/openstack/common/middleware/base.py | 55 + .../openstack/common/middleware/request_id.py | 38 + rack/openstack/common/network_utils.py | 81 + rack/openstack/common/periodic_task.py | 190 + rack/openstack/common/policy.py | 779 ++++ rack/openstack/common/processutils.py | 266 ++ rack/openstack/common/report/__init__.py | 25 + .../common/report/generators/__init__.py | 21 + .../common/report/generators/conf.py | 44 + .../common/report/generators/threading.py | 73 + .../common/report/generators/version.py | 46 + .../common/report/guru_meditation_report.py | 186 + .../common/report/models/__init__.py | 20 + rack/openstack/common/report/models/base.py | 114 + rack/openstack/common/report/models/conf.py | 58 + .../common/report/models/threading.py | 100 + .../openstack/common/report/models/version.py | 44 + .../report/models/with_default_views.py | 81 + rack/openstack/common/report/report.py | 189 + rack/openstack/common/report/utils.py | 46 + .../openstack/common/report/views/__init__.py | 22 + .../common/report/views/jinja_view.py | 125 + .../common/report/views/json/__init__.py | 19 + .../common/report/views/json/generic.py | 65 + .../common/report/views/text/__init__.py | 19 + .../common/report/views/text/generic.py | 202 + .../common/report/views/text/header.py | 51 + .../common/report/views/text/threading.py | 80 + .../common/report/views/xml/__init__.py | 19 + .../common/report/views/xml/generic.py | 85 + rack/openstack/common/service.py | 491 +++ rack/openstack/common/sslutils.py | 98 + rack/openstack/common/strutils.py | 216 + rack/openstack/common/threadgroup.py | 121 + rack/openstack/common/timeutils.py | 210 + rack/openstack/common/units.py | 38 + rack/openstack/common/uuidutils.py | 37 + rack/openstack/common/versionutils.py | 45 + rack/openstack/common/xmlutils.py | 74 + rack/paths.py | 64 + rack/policy.py | 134 + rack/resourceoperator/__init__.py | 0 rack/resourceoperator/manager.py | 160 + rack/resourceoperator/openstack/__init__.py | 65 + rack/resourceoperator/openstack/keypairs.py | 47 + rack/resourceoperator/openstack/networks.py | 68 + rack/resourceoperator/openstack/processes.py | 64 + .../openstack/securitygroups.py | 73 + rack/resourceoperator/rpcapi.py | 107 + rack/rpc.py | 144 + rack/safe_utils.py | 50 + rack/scheduler/__init__.py | 25 + rack/scheduler/chance.py | 65 + rack/scheduler/driver.py | 69 + rack/scheduler/manager.py | 69 + rack/scheduler/rpcapi.py | 63 + rack/scheduler/utils.py | 169 + rack/service.py | 373 ++ rack/servicegroup/__init__.py | 22 + rack/servicegroup/api.py | 167 + rack/servicegroup/drivers/__init__.py | 0 rack/servicegroup/drivers/db.py | 100 + rack/servicegroup/drivers/mc.py | 99 + rack/servicegroup/drivers/zk.py | 155 + rack/test.py | 285 ++ rack/tests/__init__.py | 27 + rack/tests/api/__init__.py | 0 rack/tests/api/fakes.py | 42 + rack/tests/api/v1/__init__.py | 0 rack/tests/api/v1/test_groups.py | 597 +++ rack/tests/api/v1/test_keypairs.py | 722 ++++ rack/tests/api/v1/test_networks.py | 762 ++++ rack/tests/api/v1/test_processes.py | 747 ++++ rack/tests/api/v1/test_securitygroups.py | 982 +++++ rack/tests/conf_fixture.py | 41 + rack/tests/db/__init__.py | 0 rack/tests/db/test_db_api.py | 1056 +++++ rack/tests/db/test_migrations.conf | 26 + rack/tests/db/test_migrations.py | 605 +++ rack/tests/fake_policy.py | 377 ++ rack/tests/policy_fixture.py | 73 + rack/tests/resourceoperator/__init__.py | 0 .../resourceoperator/openstack/__init__.py | 0 .../openstack/test_keypairs.py | 85 + .../openstack/test_networks.py | 109 + .../openstack/test_processes.py | 125 + .../openstack/test_securitygroups.py | 164 + rack/tests/resourceoperator/test_manager.py | 391 ++ rack/tests/resourceoperator/test_rpcapi.py | 97 + rack/tests/servicegroup/__init__.py | 0 .../servicegroup/test_db_servicegroup.py | 144 + .../servicegroup/test_mc_servicegroup.py | 209 + rack/tests/servicegroup/test_zk_driver.py | 63 + rack/tests/test_service.py | 329 ++ rack/tests/utils.py | 86 + rack/utils.py | 1165 ++++++ rack/version.py | 91 + rack/wsgi.py | 502 +++ requirements.txt | 36 + setup.cfg | 29 + setup.py | 18 + test-requirements.txt | 15 + tools/ansible-openstack/README.md | 67 + tools/ansible-openstack/ansible.cfg | 30 + tools/ansible-openstack/group_vars/all | 126 + .../ansible-openstack/group_vars/amqp_backend | 11 + .../group_vars/compute_backend | 11 + tools/ansible-openstack/group_vars/controller | 11 + tools/ansible-openstack/group_vars/frontend | 11 + .../group_vars/network_gateway | 11 + .../ansible-openstack/group_vars/sql_backend | 11 + .../group_vars/volume_backend | 11 + .../openstack-ansible-modules/README.md | 84 + .../openstack-ansible-modules/cinder_manage | 102 + .../openstack-ansible-modules/glance | 164 + .../openstack-ansible-modules/glance_manage | 124 + .../openstack-ansible-modules/keystone_manage | 111 + .../keystone_service | 302 ++ .../neutron_floating_ip | 242 ++ .../openstack-ansible-modules/neutron_network | 282 ++ .../openstack-ansible-modules/neutron_router | 210 + .../neutron_router_gateway | 215 + .../neutron_router_interface | 249 ++ .../openstack-ansible-modules/neutron_subnet | 294 ++ .../openstack-ansible-modules/nova_manage | 93 + tools/ansible-openstack/openstack_hosts | 24 + .../playbooks/cinder/controller.yml | 42 + .../playbooks/cinder/frontend.yml | 46 + .../playbooks/cinder/keystone.yml | 47 + .../playbooks/cinder/library | 1 + .../playbooks/cinder/main.yml | 5 + .../playbooks/cinder/mysql.yml | 20 + .../playbooks/cinder/templates | 1 + .../playbooks/cinder/volume.yml | 61 + .../playbooks/common/common.yml | 71 + .../common/common_kernelParameters.yml | 45 + .../common/common_sysconfig_VLAN.yml | 15 + .../playbooks/common/main.yml | 7 + .../playbooks/common/templates | 1 + .../playbooks/common/yum_KVM.yml | 37 + .../playbooks/common/yum_iproute.yml | 13 + .../playbooks/common/yum_kernel.yml | 18 + .../playbooks/glance/controller.yml | 42 + .../playbooks/glance/fixture.yml | 19 + .../playbooks/glance/frontend.yml | 54 + .../playbooks/glance/keystone.yml | 35 + .../playbooks/glance/library | 1 + .../playbooks/glance/main.yml | 6 + .../playbooks/glance/mysql.yml | 21 + .../playbooks/glance/templates | 1 + .../playbooks/horizon/main.yml | 55 + .../playbooks/horizon/templates | 1 + .../playbooks/keystone/fixture.yml | 64 + .../playbooks/keystone/frontend.yml | 89 + .../playbooks/keystone/library | 1 + .../playbooks/keystone/main.yml | 4 + .../playbooks/keystone/mysql.yml | 26 + .../playbooks/keystone/templates | 1 + .../playbooks/memcached/main.yml | 33 + .../playbooks/memcached/templates | 1 + .../playbooks/mysql/main.yml | 83 + .../playbooks/mysql/templates | 1 + .../playbooks/neutron/compute.yml | 75 + .../playbooks/neutron/frontend.yml | 67 + .../playbooks/neutron/gateway.yml | 113 + .../playbooks/neutron/keystone.yml | 35 + .../playbooks/neutron/library | 1 + .../playbooks/neutron/main.yml | 8 + .../playbooks/neutron/mysql.yml | 21 + .../playbooks/neutron/templates | 1 + .../playbooks/nova/compute.yml | 59 + .../playbooks/nova/controller.yml | 60 + .../playbooks/nova/frontend.yml | 59 + .../playbooks/nova/keystone.yml | 51 + .../ansible-openstack/playbooks/nova/library | 1 + .../ansible-openstack/playbooks/nova/main.yml | 5 + .../playbooks/nova/mysql.yml | 21 + .../playbooks/nova/templates | 1 + .../ansible-openstack/playbooks/ntp/main.yml | 34 + .../ansible-openstack/playbooks/ntp/templates | 1 + .../playbooks/post_action/compute.yml | 16 + .../playbooks/post_action/controller.yml | 25 + .../playbooks/post_action/frontend.yml | 20 + .../playbooks/post_action/gateway.yml | 20 + .../playbooks/post_action/library | 1 + .../playbooks/post_action/main.yml | 10 + .../post_action/post-prepareProviderNW.yml | 40 + .../playbooks/post_action/rabbitmq.yml | 15 + .../playbooks/pre_action/main.yml | 3 + .../playbooks/pre_action/setup.yml | 46 + .../playbooks/rabbitmq/main.yml | 71 + .../playbooks/rabbitmq/templates | 1 + .../reusables/delete_firewall_rule.yml | 6 + .../playbooks/reusables/handlers.yml | 7 + .../reusables/open_firewall_port.yml | 9 + tools/ansible-openstack/set_openstack.yml | 13 + .../templates/etc/cinder/cinder.conf | 2163 ++++++++++ .../templates/etc/glance/glance-api.conf | 674 +++ .../templates/etc/glance/glance-registry.conf | 198 + .../etc/init.d/neutron-linuxbridge-agent | 100 + .../templates/etc/keystone/keystone.conf | 1338 ++++++ tools/ansible-openstack/templates/etc/my.cnf | 46 + .../templates/etc/neutron/dhcp_agent.ini | 90 + .../templates/etc/neutron/l3_agent.ini | 80 + .../templates/etc/neutron/lbaas_agent.ini | 41 + .../templates/etc/neutron/metadata_agent.ini | 38 + .../templates/etc/neutron/neutron.conf | 472 +++ .../etc/neutron/ovs_neutron_plugin.ini | 172 + .../plugins/ml2/ml2_conf_linuxbridge_VLAN.ini | 76 + .../templates/etc/nova/nova.conf | 3706 +++++++++++++++++ .../ansible-openstack/templates/etc/ntp.conf | 8 + .../etc/openstack-dashboard/local_settings | 490 +++ .../templates/etc/rabbitmq/rabbitmq-env.conf | 2 + .../templates/etc/rabbitmq/rabbitmq.config | 3 + .../templates/etc/sysconfig/bridge.modules | 4 + .../templates/etc/sysconfig/memcached | 5 + .../etc/yum.repos.d/CentOS-Base.repo | 46 + .../etc/yum.repos.d/epel-testing.repo | 28 + .../templates/etc/yum.repos.d/epel.repo | 27 + .../etc/yum.repos.d/rdo-release.repo | 10 + .../ansible-openstack/templates/root/.my.cnf | 5 + .../templates/root/keystonerc_admin | 7 + tools/ansible-openstack/templates/root/openrc | 12 + tools/rack_client.py | 436 ++ tools/simple_integration_test.py | 599 +++ tox.ini | 30 + 327 files changed, 49162 insertions(+) create mode 100644 .coveragerc create mode 100644 .gitignore create mode 100644 .testr.conf create mode 100644 README.rst create mode 100644 etc/api-paste.ini create mode 100644 etc/rack.conf.sample create mode 100644 openstack-common.conf create mode 100644 rack/__init__.py create mode 100644 rack/api/__init__.py create mode 100644 rack/api/auth.py create mode 100644 rack/api/common.py create mode 100644 rack/api/v1/__init__.py create mode 100644 rack/api/v1/groups.py create mode 100644 rack/api/v1/keypairs.py create mode 100644 rack/api/v1/networks.py create mode 100644 rack/api/v1/processes.py create mode 100644 rack/api/v1/securitygroups.py create mode 100644 rack/api/v1/views/__init__.py create mode 100644 rack/api/v1/views/groups.py create mode 100644 rack/api/v1/views/keypairs.py create mode 100644 rack/api/v1/views/networks.py create mode 100644 rack/api/v1/views/processes.py create mode 100644 rack/api/v1/views/securitygroups.py create mode 100644 rack/api/versions.py create mode 100644 rack/api/views/__init__.py create mode 100644 rack/api/views/versions.py create mode 100644 rack/api/wsgi.py create mode 100644 rack/api/xmlutil.py create mode 100644 rack/baserpc.py create mode 100644 rack/cmd/__init__.py create mode 100644 rack/cmd/api.py create mode 100644 rack/cmd/resourceoperator.py create mode 100644 rack/cmd/scheduler.py create mode 100644 rack/config.py create mode 100644 rack/context.py create mode 100644 rack/db/__init__.py create mode 100644 rack/db/api.py create mode 100644 rack/db/base.py create mode 100644 rack/db/migration.py create mode 100644 rack/db/sqlalchemy/__init__.py create mode 100644 rack/db/sqlalchemy/api.py create mode 100644 rack/db/sqlalchemy/migrate_repo/README create mode 100644 rack/db/sqlalchemy/migrate_repo/__init__.py create mode 100644 rack/db/sqlalchemy/migrate_repo/manage.py create mode 100644 rack/db/sqlalchemy/migrate_repo/migrate.cfg create mode 100644 rack/db/sqlalchemy/migrate_repo/versions/001_Add_groups_table.py create mode 100644 rack/db/sqlalchemy/migrate_repo/versions/002_Add_services_table.py create mode 100644 rack/db/sqlalchemy/migrate_repo/versions/003_Add_keypairs_table.py create mode 100644 rack/db/sqlalchemy/migrate_repo/versions/004_Add_securitygroups_table.py create mode 100644 rack/db/sqlalchemy/migrate_repo/versions/005_Add_networks_table.py create mode 100644 rack/db/sqlalchemy/migrate_repo/versions/006_Add_processes_table.py create mode 100644 rack/db/sqlalchemy/migrate_repo/versions/007_Add_processes_securitygroups_table.py create mode 100644 rack/db/sqlalchemy/migrate_repo/versions/008_Add_processes_networks_table.py create mode 100644 rack/db/sqlalchemy/migrate_repo/versions/__init__.py create mode 100644 rack/db/sqlalchemy/migration.py create mode 100644 rack/db/sqlalchemy/models.py create mode 100644 rack/db/sqlalchemy/types.py create mode 100644 rack/db/sqlalchemy/utils.py create mode 100644 rack/debugger.py create mode 100644 rack/exception.py create mode 100644 rack/manager.py create mode 100644 rack/netconf.py create mode 100644 rack/object.py create mode 100644 rack/openstack/__init__.py create mode 100644 rack/openstack/common/README create mode 100644 rack/openstack/common/__init__.py create mode 100644 rack/openstack/common/cliutils.py create mode 100644 rack/openstack/common/config/__init__.py create mode 100644 rack/openstack/common/config/generator.py create mode 100644 rack/openstack/common/context.py create mode 100644 rack/openstack/common/db/__init__.py create mode 100644 rack/openstack/common/db/api.py create mode 100644 rack/openstack/common/db/exception.py create mode 100644 rack/openstack/common/db/options.py create mode 100644 rack/openstack/common/db/sqlalchemy/__init__.py create mode 100644 rack/openstack/common/db/sqlalchemy/migration.py create mode 100644 rack/openstack/common/db/sqlalchemy/models.py create mode 100644 rack/openstack/common/db/sqlalchemy/provision.py create mode 100644 rack/openstack/common/db/sqlalchemy/session.py create mode 100644 rack/openstack/common/db/sqlalchemy/test_base.py create mode 100644 rack/openstack/common/db/sqlalchemy/test_migrations.py create mode 100644 rack/openstack/common/db/sqlalchemy/utils.py create mode 100644 rack/openstack/common/eventlet_backdoor.py create mode 100644 rack/openstack/common/excutils.py create mode 100644 rack/openstack/common/fileutils.py create mode 100644 rack/openstack/common/fixture/__init__.py create mode 100644 rack/openstack/common/fixture/config.py create mode 100644 rack/openstack/common/fixture/lockutils.py create mode 100644 rack/openstack/common/fixture/logging.py create mode 100644 rack/openstack/common/fixture/mockpatch.py create mode 100644 rack/openstack/common/fixture/moxstubout.py create mode 100644 rack/openstack/common/gettextutils.py create mode 100644 rack/openstack/common/imageutils.py create mode 100644 rack/openstack/common/importutils.py create mode 100644 rack/openstack/common/jsonutils.py create mode 100644 rack/openstack/common/local.py create mode 100644 rack/openstack/common/lockutils.py create mode 100644 rack/openstack/common/log.py create mode 100644 rack/openstack/common/loopingcall.py create mode 100644 rack/openstack/common/memorycache.py create mode 100644 rack/openstack/common/middleware/__init__.py create mode 100644 rack/openstack/common/middleware/base.py create mode 100644 rack/openstack/common/middleware/request_id.py create mode 100644 rack/openstack/common/network_utils.py create mode 100644 rack/openstack/common/periodic_task.py create mode 100644 rack/openstack/common/policy.py create mode 100644 rack/openstack/common/processutils.py create mode 100644 rack/openstack/common/report/__init__.py create mode 100644 rack/openstack/common/report/generators/__init__.py create mode 100644 rack/openstack/common/report/generators/conf.py create mode 100644 rack/openstack/common/report/generators/threading.py create mode 100644 rack/openstack/common/report/generators/version.py create mode 100644 rack/openstack/common/report/guru_meditation_report.py create mode 100644 rack/openstack/common/report/models/__init__.py create mode 100644 rack/openstack/common/report/models/base.py create mode 100644 rack/openstack/common/report/models/conf.py create mode 100644 rack/openstack/common/report/models/threading.py create mode 100644 rack/openstack/common/report/models/version.py create mode 100644 rack/openstack/common/report/models/with_default_views.py create mode 100644 rack/openstack/common/report/report.py create mode 100644 rack/openstack/common/report/utils.py create mode 100644 rack/openstack/common/report/views/__init__.py create mode 100644 rack/openstack/common/report/views/jinja_view.py create mode 100644 rack/openstack/common/report/views/json/__init__.py create mode 100644 rack/openstack/common/report/views/json/generic.py create mode 100644 rack/openstack/common/report/views/text/__init__.py create mode 100644 rack/openstack/common/report/views/text/generic.py create mode 100644 rack/openstack/common/report/views/text/header.py create mode 100644 rack/openstack/common/report/views/text/threading.py create mode 100644 rack/openstack/common/report/views/xml/__init__.py create mode 100644 rack/openstack/common/report/views/xml/generic.py create mode 100644 rack/openstack/common/service.py create mode 100644 rack/openstack/common/sslutils.py create mode 100644 rack/openstack/common/strutils.py create mode 100644 rack/openstack/common/threadgroup.py create mode 100644 rack/openstack/common/timeutils.py create mode 100644 rack/openstack/common/units.py create mode 100644 rack/openstack/common/uuidutils.py create mode 100644 rack/openstack/common/versionutils.py create mode 100644 rack/openstack/common/xmlutils.py create mode 100644 rack/paths.py create mode 100644 rack/policy.py create mode 100644 rack/resourceoperator/__init__.py create mode 100644 rack/resourceoperator/manager.py create mode 100644 rack/resourceoperator/openstack/__init__.py create mode 100644 rack/resourceoperator/openstack/keypairs.py create mode 100644 rack/resourceoperator/openstack/networks.py create mode 100644 rack/resourceoperator/openstack/processes.py create mode 100644 rack/resourceoperator/openstack/securitygroups.py create mode 100644 rack/resourceoperator/rpcapi.py create mode 100644 rack/rpc.py create mode 100644 rack/safe_utils.py create mode 100644 rack/scheduler/__init__.py create mode 100644 rack/scheduler/chance.py create mode 100644 rack/scheduler/driver.py create mode 100644 rack/scheduler/manager.py create mode 100644 rack/scheduler/rpcapi.py create mode 100644 rack/scheduler/utils.py create mode 100644 rack/service.py create mode 100644 rack/servicegroup/__init__.py create mode 100644 rack/servicegroup/api.py create mode 100644 rack/servicegroup/drivers/__init__.py create mode 100644 rack/servicegroup/drivers/db.py create mode 100644 rack/servicegroup/drivers/mc.py create mode 100644 rack/servicegroup/drivers/zk.py create mode 100644 rack/test.py create mode 100644 rack/tests/__init__.py create mode 100644 rack/tests/api/__init__.py create mode 100644 rack/tests/api/fakes.py create mode 100644 rack/tests/api/v1/__init__.py create mode 100644 rack/tests/api/v1/test_groups.py create mode 100644 rack/tests/api/v1/test_keypairs.py create mode 100644 rack/tests/api/v1/test_networks.py create mode 100644 rack/tests/api/v1/test_processes.py create mode 100644 rack/tests/api/v1/test_securitygroups.py create mode 100644 rack/tests/conf_fixture.py create mode 100644 rack/tests/db/__init__.py create mode 100644 rack/tests/db/test_db_api.py create mode 100644 rack/tests/db/test_migrations.conf create mode 100644 rack/tests/db/test_migrations.py create mode 100644 rack/tests/fake_policy.py create mode 100644 rack/tests/policy_fixture.py create mode 100644 rack/tests/resourceoperator/__init__.py create mode 100644 rack/tests/resourceoperator/openstack/__init__.py create mode 100644 rack/tests/resourceoperator/openstack/test_keypairs.py create mode 100644 rack/tests/resourceoperator/openstack/test_networks.py create mode 100644 rack/tests/resourceoperator/openstack/test_processes.py create mode 100644 rack/tests/resourceoperator/openstack/test_securitygroups.py create mode 100644 rack/tests/resourceoperator/test_manager.py create mode 100644 rack/tests/resourceoperator/test_rpcapi.py create mode 100644 rack/tests/servicegroup/__init__.py create mode 100644 rack/tests/servicegroup/test_db_servicegroup.py create mode 100644 rack/tests/servicegroup/test_mc_servicegroup.py create mode 100644 rack/tests/servicegroup/test_zk_driver.py create mode 100644 rack/tests/test_service.py create mode 100644 rack/tests/utils.py create mode 100644 rack/utils.py create mode 100644 rack/version.py create mode 100644 rack/wsgi.py create mode 100644 requirements.txt create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 test-requirements.txt create mode 100644 tools/ansible-openstack/README.md create mode 100644 tools/ansible-openstack/ansible.cfg create mode 100644 tools/ansible-openstack/group_vars/all create mode 100644 tools/ansible-openstack/group_vars/amqp_backend create mode 100644 tools/ansible-openstack/group_vars/compute_backend create mode 100644 tools/ansible-openstack/group_vars/controller create mode 100644 tools/ansible-openstack/group_vars/frontend create mode 100644 tools/ansible-openstack/group_vars/network_gateway create mode 100644 tools/ansible-openstack/group_vars/sql_backend create mode 100644 tools/ansible-openstack/group_vars/volume_backend create mode 100644 tools/ansible-openstack/openstack-ansible-modules/README.md create mode 100644 tools/ansible-openstack/openstack-ansible-modules/cinder_manage create mode 100644 tools/ansible-openstack/openstack-ansible-modules/glance create mode 100644 tools/ansible-openstack/openstack-ansible-modules/glance_manage create mode 100644 tools/ansible-openstack/openstack-ansible-modules/keystone_manage create mode 100644 tools/ansible-openstack/openstack-ansible-modules/keystone_service create mode 100644 tools/ansible-openstack/openstack-ansible-modules/neutron_floating_ip create mode 100644 tools/ansible-openstack/openstack-ansible-modules/neutron_network create mode 100644 tools/ansible-openstack/openstack-ansible-modules/neutron_router create mode 100644 tools/ansible-openstack/openstack-ansible-modules/neutron_router_gateway create mode 100644 tools/ansible-openstack/openstack-ansible-modules/neutron_router_interface create mode 100644 tools/ansible-openstack/openstack-ansible-modules/neutron_subnet create mode 100644 tools/ansible-openstack/openstack-ansible-modules/nova_manage create mode 100644 tools/ansible-openstack/openstack_hosts create mode 100644 tools/ansible-openstack/playbooks/cinder/controller.yml create mode 100644 tools/ansible-openstack/playbooks/cinder/frontend.yml create mode 100644 tools/ansible-openstack/playbooks/cinder/keystone.yml create mode 100644 tools/ansible-openstack/playbooks/cinder/library create mode 100644 tools/ansible-openstack/playbooks/cinder/main.yml create mode 100644 tools/ansible-openstack/playbooks/cinder/mysql.yml create mode 100644 tools/ansible-openstack/playbooks/cinder/templates create mode 100644 tools/ansible-openstack/playbooks/cinder/volume.yml create mode 100644 tools/ansible-openstack/playbooks/common/common.yml create mode 100644 tools/ansible-openstack/playbooks/common/common_kernelParameters.yml create mode 100644 tools/ansible-openstack/playbooks/common/common_sysconfig_VLAN.yml create mode 100644 tools/ansible-openstack/playbooks/common/main.yml create mode 100644 tools/ansible-openstack/playbooks/common/templates create mode 100644 tools/ansible-openstack/playbooks/common/yum_KVM.yml create mode 100644 tools/ansible-openstack/playbooks/common/yum_iproute.yml create mode 100644 tools/ansible-openstack/playbooks/common/yum_kernel.yml create mode 100644 tools/ansible-openstack/playbooks/glance/controller.yml create mode 100644 tools/ansible-openstack/playbooks/glance/fixture.yml create mode 100644 tools/ansible-openstack/playbooks/glance/frontend.yml create mode 100644 tools/ansible-openstack/playbooks/glance/keystone.yml create mode 100644 tools/ansible-openstack/playbooks/glance/library create mode 100644 tools/ansible-openstack/playbooks/glance/main.yml create mode 100644 tools/ansible-openstack/playbooks/glance/mysql.yml create mode 100644 tools/ansible-openstack/playbooks/glance/templates create mode 100644 tools/ansible-openstack/playbooks/horizon/main.yml create mode 100644 tools/ansible-openstack/playbooks/horizon/templates create mode 100644 tools/ansible-openstack/playbooks/keystone/fixture.yml create mode 100644 tools/ansible-openstack/playbooks/keystone/frontend.yml create mode 100644 tools/ansible-openstack/playbooks/keystone/library create mode 100644 tools/ansible-openstack/playbooks/keystone/main.yml create mode 100644 tools/ansible-openstack/playbooks/keystone/mysql.yml create mode 100644 tools/ansible-openstack/playbooks/keystone/templates create mode 100644 tools/ansible-openstack/playbooks/memcached/main.yml create mode 100644 tools/ansible-openstack/playbooks/memcached/templates create mode 100644 tools/ansible-openstack/playbooks/mysql/main.yml create mode 100644 tools/ansible-openstack/playbooks/mysql/templates create mode 100644 tools/ansible-openstack/playbooks/neutron/compute.yml create mode 100644 tools/ansible-openstack/playbooks/neutron/frontend.yml create mode 100644 tools/ansible-openstack/playbooks/neutron/gateway.yml create mode 100644 tools/ansible-openstack/playbooks/neutron/keystone.yml create mode 100644 tools/ansible-openstack/playbooks/neutron/library create mode 100644 tools/ansible-openstack/playbooks/neutron/main.yml create mode 100644 tools/ansible-openstack/playbooks/neutron/mysql.yml create mode 100644 tools/ansible-openstack/playbooks/neutron/templates create mode 100644 tools/ansible-openstack/playbooks/nova/compute.yml create mode 100644 tools/ansible-openstack/playbooks/nova/controller.yml create mode 100644 tools/ansible-openstack/playbooks/nova/frontend.yml create mode 100644 tools/ansible-openstack/playbooks/nova/keystone.yml create mode 100644 tools/ansible-openstack/playbooks/nova/library create mode 100644 tools/ansible-openstack/playbooks/nova/main.yml create mode 100644 tools/ansible-openstack/playbooks/nova/mysql.yml create mode 100644 tools/ansible-openstack/playbooks/nova/templates create mode 100644 tools/ansible-openstack/playbooks/ntp/main.yml create mode 100644 tools/ansible-openstack/playbooks/ntp/templates create mode 100644 tools/ansible-openstack/playbooks/post_action/compute.yml create mode 100644 tools/ansible-openstack/playbooks/post_action/controller.yml create mode 100644 tools/ansible-openstack/playbooks/post_action/frontend.yml create mode 100644 tools/ansible-openstack/playbooks/post_action/gateway.yml create mode 100644 tools/ansible-openstack/playbooks/post_action/library create mode 100644 tools/ansible-openstack/playbooks/post_action/main.yml create mode 100644 tools/ansible-openstack/playbooks/post_action/post-prepareProviderNW.yml create mode 100644 tools/ansible-openstack/playbooks/post_action/rabbitmq.yml create mode 100644 tools/ansible-openstack/playbooks/pre_action/main.yml create mode 100644 tools/ansible-openstack/playbooks/pre_action/setup.yml create mode 100644 tools/ansible-openstack/playbooks/rabbitmq/main.yml create mode 100644 tools/ansible-openstack/playbooks/rabbitmq/templates create mode 100644 tools/ansible-openstack/playbooks/reusables/delete_firewall_rule.yml create mode 100644 tools/ansible-openstack/playbooks/reusables/handlers.yml create mode 100644 tools/ansible-openstack/playbooks/reusables/open_firewall_port.yml create mode 100644 tools/ansible-openstack/set_openstack.yml create mode 100644 tools/ansible-openstack/templates/etc/cinder/cinder.conf create mode 100644 tools/ansible-openstack/templates/etc/glance/glance-api.conf create mode 100644 tools/ansible-openstack/templates/etc/glance/glance-registry.conf create mode 100644 tools/ansible-openstack/templates/etc/init.d/neutron-linuxbridge-agent create mode 100644 tools/ansible-openstack/templates/etc/keystone/keystone.conf create mode 100644 tools/ansible-openstack/templates/etc/my.cnf create mode 100644 tools/ansible-openstack/templates/etc/neutron/dhcp_agent.ini create mode 100644 tools/ansible-openstack/templates/etc/neutron/l3_agent.ini create mode 100644 tools/ansible-openstack/templates/etc/neutron/lbaas_agent.ini create mode 100644 tools/ansible-openstack/templates/etc/neutron/metadata_agent.ini create mode 100644 tools/ansible-openstack/templates/etc/neutron/neutron.conf create mode 100644 tools/ansible-openstack/templates/etc/neutron/ovs_neutron_plugin.ini create mode 100644 tools/ansible-openstack/templates/etc/neutron/plugins/ml2/ml2_conf_linuxbridge_VLAN.ini create mode 100644 tools/ansible-openstack/templates/etc/nova/nova.conf create mode 100644 tools/ansible-openstack/templates/etc/ntp.conf create mode 100644 tools/ansible-openstack/templates/etc/openstack-dashboard/local_settings create mode 100644 tools/ansible-openstack/templates/etc/rabbitmq/rabbitmq-env.conf create mode 100644 tools/ansible-openstack/templates/etc/rabbitmq/rabbitmq.config create mode 100644 tools/ansible-openstack/templates/etc/sysconfig/bridge.modules create mode 100644 tools/ansible-openstack/templates/etc/sysconfig/memcached create mode 100644 tools/ansible-openstack/templates/etc/yum.repos.d/CentOS-Base.repo create mode 100644 tools/ansible-openstack/templates/etc/yum.repos.d/epel-testing.repo create mode 100644 tools/ansible-openstack/templates/etc/yum.repos.d/epel.repo create mode 100644 tools/ansible-openstack/templates/etc/yum.repos.d/rdo-release.repo create mode 100644 tools/ansible-openstack/templates/root/.my.cnf create mode 100644 tools/ansible-openstack/templates/root/keystonerc_admin create mode 100644 tools/ansible-openstack/templates/root/openrc create mode 100755 tools/rack_client.py create mode 100644 tools/simple_integration_test.py create mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..3f5dd14 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True +source = rack +omit = rack/tests/*,rack/openstack/* + +[report] +ignore-errors = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0d12d80 --- /dev/null +++ b/.gitignore @@ -0,0 +1,45 @@ +*.DS_Store +*.egg* +*.log +*.mo +*.pyc +*.swo +*.swp +*.sqlite +*~ +.autogenerated +.coverage +.rack-venv +.project +.pydevproject +.ropeproject +.testrepository/ +.settings +.tox +.idea +.venv +AUTHORS +Authors +build-stamp +build/* +bin/* +CA/ +ChangeLog +coverage.xml +cover/* +covhtml +dist/* +doc/source/api/* +doc/build/* +etc/rack.conf +instances +keeper +keys +local_settings.py +MANIFEST +nosetests.xml +rack/tests/cover/* +rack/vcsversion.py +tools/conf/rack.conf* +tools/lintstack.head.py +tools/pylint_exceptions diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 0000000..6982d69 --- /dev/null +++ b/.testr.conf @@ -0,0 +1,8 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \ + ${PYTHON:-python} -m subunit.run discover -t ./ ./rack/tests $LISTOPT $IDOPTION + +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..dc6f1d8 --- /dev/null +++ b/README.rst @@ -0,0 +1,22 @@ +RACK(Real Application Centric Kernel) README +===================== + +RACK provides the ability that can control OpenStack as program resource with an application. + From an application, the VM instance looks like the Linux process through the RACK, so you can use "Exec", "Fork", "Kill" commands against the processes(actually VMs). +It enables you to implement a large scale distributed system in a variety of programming languages on OpenStack. + +You can use RACK in many cases. +Followings are some examples. + + * You can implement a new architecture application. + For example, you can build an application that calculates the necessary amount of computing resource(i.e. instance) depending on the data to process and launches additional instances dynamically. + Then, the data will be processed very quickly since these instances work in parallel. + This new architecture application is suitable for processing a large amount of data. + + * You can integrate existing system such as batch system with Hadoop and Web application using RACK. + For example, RACK enables you to deploy Hadoop cluster easily and add autoscale function to your Web applications. + +To learn about RACK in detail, read this page on the wiki: + + https://wiki.openstack.org/wiki/RACK + diff --git a/etc/api-paste.ini b/etc/api-paste.ini new file mode 100644 index 0000000..4132e9d --- /dev/null +++ b/etc/api-paste.ini @@ -0,0 +1,33 @@ +[composite:rackapi] +use = egg:Paste#urlmap +/ = rackversions +/v1 = rackapi_v1 + +[composite:rackapi_v1] +use = call:rack.api.auth:pipeline_factory +noauth = faultwrap noauth rackapp_v1 +keystone = faultwrap authtoken keystonecontext rackapp_v1 + +[filter:faultwrap] +paste.filter_factory = rack.api:FaultWrapper.factory + +[filter:noauth] +paste.filter_factory = rack.api.auth:NoAuthMiddleware.factory + +[pipeline:rackversions] +pipeline = faultwrap rackversionapp + +[app:rackversionapp] +paste.app_factory = rack.api.versions:Versions.factory + +[app:rackapp_v1] +paste.app_factory = rack.api.v1:APIRouter.factory + +[filter:keystonecontext] +paste.filter_factory = rack.api.auth:RackKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +auth_port = 35357 +auth_protocol = http +auth_version = v2.0 \ No newline at end of file diff --git a/etc/rack.conf.sample b/etc/rack.conf.sample new file mode 100644 index 0000000..b44e430 --- /dev/null +++ b/etc/rack.conf.sample @@ -0,0 +1,23 @@ +[DEFAULT] +#debug = True +#verbose = True +#rabbit_password = guest +#rabbit_host = localhost +#rpc_backend = rack.openstack.common.rpc.impl_kombu +#lock_path = /var/lib/rack/lock +#state_path = /var/lib/rack +#sql_connection = mysql://root:password@127.0.0.1/rack?charset=utf8 +#my_ip = 127.0.0.1 +#api_paste_config = /etc/api-paste.ini +#auth_strategy = noauth +#os_username = admin +#os_password = password +#os_tenant_name = demo +#os_auth_url = http://localhost:5000/v2.0 + +[keystone_authtoken] +#signing_dir = /var/cache/rack +#admin_password = password +#admin_user = rack +#admin_tenant_name = services +#auth_host = 127.0.0.1 diff --git a/openstack-common.conf b/openstack-common.conf new file mode 100644 index 0000000..d058c86 --- /dev/null +++ b/openstack-common.conf @@ -0,0 +1,47 @@ +[DEFAULT] + +# The list of modules to copy from oslo-incubator.git +module=cliutils +module=config +module=context +module=db +module=db.sqlalchemy +module=eventlet_backdoor +module=excutils +module=fileutils +module=fixture +module=gettextutils +module=imageutils +module=importutils +module=install_venv_common +module=jsonutils +module=local +module=lockutils +module=log +module=loopingcall +module=memorycache +module=middleware/base +module=middleware/request_id +module=network_utils +module=periodic_task +module=policy +module=processutils +module=report +module=report.generators +module=report.models +module=report.views +module=report.views.xml +module=report.views.json +module=report.views.text +module=service +module=sslutils +module=strutils +module=threadgroup +module=timeutils +module=units +module=uuidutils +module=versionutils +module=xmlutils + +# The base module to hold the copy of openstack.common +base=rack \ No newline at end of file diff --git a/rack/__init__.py b/rack/__init__.py new file mode 100644 index 0000000..38c387c --- /dev/null +++ b/rack/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +:mod:`rack` -- Cloud IaaS Platform +=================================== + +.. automodule:: rack + :platform: Unix + :synopsis: Infrastructure-as-a-Service Cloud platform. +""" diff --git a/rack/api/__init__.py b/rack/api/__init__.py new file mode 100644 index 0000000..faf5a78 --- /dev/null +++ b/rack/api/__init__.py @@ -0,0 +1,82 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg +import routes +import stevedore +import webob.dec +import webob.exc + +from rack.api import wsgi +from rack import exception +from rack.openstack.common import gettextutils +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack import utils +from rack import wsgi as base_wsgi + +LOG = logging.getLogger(__name__) + +class FaultWrapper(base_wsgi.Middleware): + """Calls down the middleware stack, making exceptions into faults.""" + + _status_to_type = {} + + @staticmethod + def status_to_type(status): + if not FaultWrapper._status_to_type: + for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError): + FaultWrapper._status_to_type[clazz.code] = clazz + return FaultWrapper._status_to_type.get( + status, webob.exc.HTTPInternalServerError)() + + def _error(self, inner, req): + LOG.exception(_("Caught error: %s"), unicode(inner)) + + safe = getattr(inner, 'safe', False) + headers = getattr(inner, 'headers', None) + status = getattr(inner, 'code', 500) + if status is None: + status = 500 + + msg_dict = dict(url=req.url, status=status) + LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) + outer = self.status_to_type(status) + if headers: + outer.headers = headers + # NOTE(johannes): We leave the explanation empty here on + # purpose. It could possibly have sensitive information + # that should not be returned back to the user. See + # bugs 868360 and 874472 + # NOTE(eglynn): However, it would be over-conservative and + # inconsistent with the EC2 API to hide every exception, + # including those that are safe to expose, see bug 1021373 + if safe: + if isinstance(inner.msg_fmt, gettextutils.Message): + user_locale = req.best_match_language() + inner_msg = gettextutils.translate( + inner.msg_fmt, user_locale) + else: + inner_msg = unicode(inner) + outer.explanation = '%s: %s' % (inner.__class__.__name__, + inner_msg) + + #notifications.send_api_fault(req.url, status, inner) + return wsgi.Fault(outer) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + try: + return req.get_response(self.application) + except Exception as ex: + return self._error(ex, req) \ No newline at end of file diff --git a/rack/api/auth.py b/rack/api/auth.py new file mode 100644 index 0000000..d2a5690 --- /dev/null +++ b/rack/api/auth.py @@ -0,0 +1,188 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Common Auth Middleware. + +""" + +from oslo.config import cfg +import webob.dec +import webob.exc + +from rack import context +from rack.api import wsgi +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import jsonutils +from rack.openstack.common import log as logging +from rack import wsgi as base_wsgi + + +auth_opts = [ + cfg.BoolOpt('api_rate_limit', + default=False, + help=('Whether to use per-user rate limiting for the api. ')), + cfg.StrOpt('auth_strategy', + default='noauth', + help='The strategy to use for auth: noauth or keystone.'), + cfg.BoolOpt('use_forwarded_for', + default=False, + help='Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.'), +] + +CONF = cfg.CONF +CONF.register_opts(auth_opts) + +LOG = logging.getLogger(__name__) + + +def _load_pipeline(loader, pipeline): + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for filter in filters: + app = filter(app) + return app + + +def pipeline_factory(loader, global_conf, **local_conf): + """A paste pipeline replica that keys off of auth_strategy.""" + pipeline = local_conf[CONF.auth_strategy] + if not CONF.api_rate_limit: + limit_name = CONF.auth_strategy + '_nolimit' + pipeline = local_conf.get(limit_name, pipeline) + pipeline = pipeline.split() + return _load_pipeline(loader, pipeline) + + +class InjectContext(base_wsgi.Middleware): + """Add a 'rack.context' to WSGI environ.""" + + def __init__(self, context, *args, **kwargs): + self.context = context + super(InjectContext, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + req.environ['rack.context'] = self.context + return self.application + + +class RackKeystoneContext(base_wsgi.Middleware): + """Make a request context from keystone headers.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + user_id = req.headers.get('X_USER') + user_id = req.headers.get('X_USER_ID', user_id) + if user_id is None: + LOG.debug("Neither X_USER_ID nor X_USER found in request") + return webob.exc.HTTPUnauthorized() + + roles = self._get_roles(req) + + if 'X_TENANT_ID' in req.headers: + # This is the new header since Keystone went to ID/Name + project_id = req.headers['X_TENANT_ID'] + else: + # This is for legacy compatibility + project_id = req.headers['X_TENANT'] + project_name = req.headers.get('X_TENANT_NAME') + user_name = req.headers.get('X_USER_NAME') + + # Get the auth token + auth_token = req.headers.get('X_AUTH_TOKEN', + req.headers.get('X_STORAGE_TOKEN')) + + # Build a context, including the auth_token... + remote_address = req.remote_addr + if CONF.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + + service_catalog = None + if req.headers.get('X_SERVICE_CATALOG') is not None: + try: + catalog_header = req.headers.get('X_SERVICE_CATALOG') + service_catalog = jsonutils.loads(catalog_header) + except ValueError: + raise webob.exc.HTTPInternalServerError( + _('Invalid service catalog json.')) + + ctx = context.RequestContext(user_id, + project_id, + user_name=user_name, + project_name=project_name, + roles=roles, + auth_token=auth_token, + remote_address=remote_address, + service_catalog=service_catalog) + + req.environ['rack.context'] = ctx + return self.application + + def _get_roles(self, req): + """Get the list of roles.""" + + if 'X_ROLES' in req.headers: + roles = req.headers.get('X_ROLES', '') + else: + # Fallback to deprecated role header: + roles = req.headers.get('X_ROLE', '') + if roles: + LOG.warn(_("Sourcing roles from deprecated X-Role HTTP " + "header")) + return [r.strip() for r in roles.split(',')] + + +class NoAuthMiddlewareBase(base_wsgi.Middleware): + """Return a fake token if one isn't specified.""" + + def base_call(self, req, project_id_in_path): + if 'X-Auth-Token' not in req.headers: + user_id = req.headers.get('X-Auth-User', 'admin') + project_id = req.headers.get('X-Auth-Project-Id', 'admin') + if project_id_in_path: + os_url = '/'.join([req.url.rstrip('/'), project_id]) + else: + os_url = req.url.rstrip('/') + res = webob.Response() + # NOTE(vish): This is expecting and returning Auth(1.1), whereas + # keystone uses 2.0 auth. We should probably allow + # 2.0 auth here as well. + res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) + res.headers['X-Server-Management-Url'] = os_url + res.content_type = 'text/plain' + res.status = '204' + return res + + token = req.headers['X-Auth-Token'] + user_id, _sep, project_id = token.partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if CONF.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=True, + remote_address=remote_address) + + req.environ['rack.context'] = ctx + return self.application + + +class NoAuthMiddleware(NoAuthMiddlewareBase): + """Return a fake token if one isn't specified.""" + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + return self.base_call(req, True) diff --git a/rack/api/common.py b/rack/api/common.py new file mode 100644 index 0000000..bd5f466 --- /dev/null +++ b/rack/api/common.py @@ -0,0 +1,448 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import itertools +import os +import re + +from oslo.config import cfg +import six.moves.urllib.parse as urlparse +import webob +from webob import exc + +from rack.api import wsgi +from rack.api import xmlutil +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +osapi_opts = [ + cfg.IntOpt('osapi_max_limit', + default=1000, + help='The maximum number of items returned in a single ' + 'response from a collection resource'), + cfg.StrOpt('osapi_compute_link_prefix', + help='Base URL that will be presented to users in links ' + 'to the OpenStack Compute API'), + cfg.StrOpt('osapi_glance_link_prefix', + help='Base URL that will be presented to users in links ' + 'to glance resources'), +] +CONF = cfg.CONF +CONF.register_opts(osapi_opts) + +LOG = logging.getLogger(__name__) + + +VALID_NAME_REGEX = re.compile("^(?! )[\w. _-]+(? max_limit, we default to max_limit. Negative values + for either marker or limit will cause + exc.HTTPBadRequest() exceptions to be raised. + + """ + params = {} + if 'limit' in request.GET: + params['limit'] = _get_int_param(request, 'limit') + if 'page_size' in request.GET: + params['page_size'] = _get_int_param(request, 'page_size') + if 'marker' in request.GET: + params['marker'] = _get_marker_param(request) + return params + + +def _get_int_param(request, param): + """Extract integer param from request or fail.""" + try: + int_param = int(request.GET[param]) + except ValueError: + msg = _('%s param must be an integer') % param + raise webob.exc.HTTPBadRequest(explanation=msg) + if int_param < 0: + msg = _('%s param must be positive') % param + raise webob.exc.HTTPBadRequest(explanation=msg) + return int_param + + +def _get_marker_param(request): + """Extract marker id from request or fail.""" + return request.GET['marker'] + + +def limited(items, request, max_limit=CONF.osapi_max_limit): + """Return a slice of items according to requested offset and limit. + + :param items: A sliceable entity + :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' + GET variables. 'offset' is where to start in the list, + and 'limit' is the maximum number of items to return. If + 'limit' is not specified, 0, or > max_limit, we default + to max_limit. Negative values for either offset or limit + will cause exc.HTTPBadRequest() exceptions to be raised. + :kwarg max_limit: The maximum number of items to return from 'items' + """ + try: + offset = int(request.GET.get('offset', 0)) + except ValueError: + msg = _('offset param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + limit = int(request.GET.get('limit', max_limit)) + except ValueError: + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if offset < 0: + msg = _('offset param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + limit = min(max_limit, limit or max_limit) + range_end = offset + limit + return items[offset:range_end] + + +def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit): + """get limited parameter from request.""" + params = get_pagination_params(request) + limit = params.get('limit', max_limit) + limit = min(max_limit, limit) + marker = params.get('marker') + + return limit, marker + + +def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit): + """Return a slice of items according to the requested marker and limit.""" + limit, marker = get_limit_and_marker(request, max_limit) + + limit = min(max_limit, limit) + start_index = 0 + if marker: + start_index = -1 + for i, item in enumerate(items): + if 'flavorid' in item: + if item['flavorid'] == marker: + start_index = i + 1 + break + elif item['id'] == marker or item.get('uuid') == marker: + start_index = i + 1 + break + if start_index < 0: + msg = _('marker [%s] not found') % marker + raise webob.exc.HTTPBadRequest(explanation=msg) + range_end = start_index + limit + return items[start_index:range_end] + + +def get_id_from_href(href): + """Return the id or uuid portion of a url. + + Given: 'http://www.foo.com/bar/123?q=4' + Returns: '123' + + Given: 'http://www.foo.com/bar/abc123?q=4' + Returns: 'abc123' + + """ + return urlparse.urlsplit("%s" % href).path.split('/')[-1] + + +def remove_version_from_href(href): + """Removes the first api version from the href. + + Given: 'http://www.rack.com/v1.1/123' + Returns: 'http://www.rack.com/123' + + Given: 'http://www.rack.com/v1.1' + Returns: 'http://www.rack.com' + + """ + parsed_url = urlparse.urlsplit(href) + url_parts = parsed_url.path.split('/', 2) + + # NOTE: this should match vX.X or vX + expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') + if expression.match(url_parts[1]): + del url_parts[1] + + new_path = '/'.join(url_parts) + + if new_path == parsed_url.path: + msg = _('href %s does not contain version') % href + LOG.debug(msg) + raise ValueError(msg) + + parsed_url = list(parsed_url) + parsed_url[2] = new_path + return urlparse.urlunsplit(parsed_url) + + +def dict_to_query_str(params): + # TODO(throughnothing): we should just use urllib.urlencode instead of this + # But currently we don't work with urlencoded url's + param_str = "" + for key, val in params.iteritems(): + param_str = param_str + '='.join([str(key), str(val)]) + '&' + + return param_str.rstrip('&') + + +def get_networks_for_instance_from_nw_info(nw_info): + networks = {} + for vif in nw_info: + ips = vif.fixed_ips() + floaters = vif.floating_ips() + label = vif['network']['label'] + if label not in networks: + networks[label] = {'ips': [], 'floating_ips': []} + + networks[label]['ips'].extend(ips) + networks[label]['floating_ips'].extend(floaters) + for ip in itertools.chain(networks[label]['ips'], + networks[label]['floating_ips']): + ip['mac_address'] = vif['address'] + return networks + + +def raise_http_conflict_for_instance_invalid_state(exc, action): + """Raises a webob.exc.HTTPConflict instance containing a message + appropriate to return via the API based on the original + InstanceInvalidState exception. + """ + attr = exc.kwargs.get('attr') + state = exc.kwargs.get('state') + not_launched = exc.kwargs.get('not_launched') + if attr and state: + msg = _("Cannot '%(action)s' while instance is in %(attr)s " + "%(state)s") % {'action': action, 'attr': attr, 'state': state} + elif not_launched: + msg = _("Cannot '%s' an instance which has never been active") % action + else: + # At least give some meaningful message + msg = _("Instance is in an invalid state for '%s'") % action + raise webob.exc.HTTPConflict(explanation=msg) + + +class MetadataDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = xmlutil.safe_minidom_parse_string(text) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + +class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = xmlutil.safe_minidom_parse_string(text) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +class MetadataXMLDeserializer(wsgi.XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request.""" + if metadata_node is None: + return {} + metadata = {} + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + def _extract_metadata_container(self, datastring): + dom = xmlutil.safe_minidom_parse_string(datastring) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + def create(self, datastring): + return self._extract_metadata_container(datastring) + + def update_all(self, datastring): + return self._extract_metadata_container(datastring) + + def update(self, datastring): + dom = xmlutil.safe_minidom_parse_string(datastring) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +metadata_nsmap = {None: xmlutil.XMLNS_V11} + + +class MetaItemTemplate(xmlutil.TemplateBuilder): + def construct(self): + sel = xmlutil.Selector('meta', xmlutil.get_items, 0) + root = xmlutil.TemplateElement('meta', selector=sel) + root.set('key', 0) + root.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) + + +class MetadataTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return True + + +class MetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = MetadataTemplateElement('metadata', selector='metadata') + elem = xmlutil.SubTemplateElement(root, 'meta', + selector=xmlutil.get_items) + elem.set('key', 0) + elem.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) + + +def check_snapshots_enabled(f): + @functools.wraps(f) + def inner(*args, **kwargs): + if not CONF.allow_instance_snapshots: + LOG.warn(_('Rejecting snapshot request, snapshots currently' + ' disabled')) + msg = _("Instance snapshots are not permitted at this time.") + raise webob.exc.HTTPBadRequest(explanation=msg) + return f(*args, **kwargs) + return inner + + +class ViewBuilder(object): + """Model API responses as dictionaries.""" + + def _get_project_id(self, request): + """Get project id from request url if present or empty string + otherwise + """ + project_id = request.environ["rack.context"].project_id + if project_id in request.url: + return project_id + return '' + + def _get_links(self, request, identifier, collection_name): + return [{ + "rel": "self", + "href": self._get_href_link(request, identifier, collection_name), + }, + { + "rel": "bookmark", + "href": self._get_bookmark_link(request, + identifier, + collection_name), + }] + + def _get_next_link(self, request, identifier, collection_name): + """Return href string with proper limit and marker params.""" + params = request.params.copy() + params["marker"] = identifier + prefix = self._update_compute_link_prefix(request.application_url) + url = os.path.join(prefix, + self._get_project_id(request), + collection_name) + return "%s?%s" % (url, dict_to_query_str(params)) + + def _get_href_link(self, request, identifier, collection_name): + """Return an href string pointing to this object.""" + prefix = self._update_compute_link_prefix(request.application_url) + return os.path.join(prefix, + self._get_project_id(request), + collection_name, + str(identifier)) + + def _get_bookmark_link(self, request, identifier, collection_name): + """Create a URL that refers to a specific resource.""" + base_url = remove_version_from_href(request.application_url) + base_url = self._update_compute_link_prefix(base_url) + return os.path.join(base_url, + self._get_project_id(request), + collection_name, + str(identifier)) + + def _get_collection_links(self, + request, + items, + collection_name, + id_key="uuid"): + """Retrieve 'next' link, if applicable.""" + links = [] + limit = int(request.params.get("limit", 0)) + if limit and limit == len(items): + last_item = items[-1] + if id_key in last_item: + last_item_id = last_item[id_key] + elif 'id' in last_item: + last_item_id = last_item["id"] + else: + last_item_id = last_item["flavorid"] + links.append({ + "rel": "next", + "href": self._get_next_link(request, + last_item_id, + collection_name), + }) + return links + + def _update_link_prefix(self, orig_url, prefix): + if not prefix: + return orig_url + url_parts = list(urlparse.urlsplit(orig_url)) + prefix_parts = list(urlparse.urlsplit(prefix)) + url_parts[0:2] = prefix_parts[0:2] + return urlparse.urlunsplit(url_parts) + + def _update_glance_link_prefix(self, orig_url): + return self._update_link_prefix(orig_url, + CONF.osapi_glance_link_prefix) + + def _update_compute_link_prefix(self, orig_url): + return self._update_link_prefix(orig_url, + CONF.osapi_compute_link_prefix) + + +def get_instance(compute_api, context, instance_id, want_objects=False, + expected_attrs=None): + """Fetch an instance from the compute API, handling error checking.""" + try: + return compute_api.get(context, instance_id, + want_objects=want_objects, + expected_attrs=expected_attrs) + except exception.InstanceNotFound as e: + raise exc.HTTPNotFound(explanation=e.format_message()) + + +def check_cells_enabled(function): + @functools.wraps(function) + def inner(*args, **kwargs): + if not CONF.cells.enable: + msg = _("Cells is not enabled.") + raise webob.exc.HTTPNotImplemented(explanation=msg) + return function(*args, **kwargs) + return inner diff --git a/rack/api/v1/__init__.py b/rack/api/v1/__init__.py new file mode 100644 index 0000000..5ac6542 --- /dev/null +++ b/rack/api/v1/__init__.py @@ -0,0 +1,186 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +WSGI middleware for RACK API controllers. +""" + +from oslo.config import cfg +import routes +import stevedore +import webob.dec +import webob.exc + +from rack.api import wsgi +from rack.api.v1 import groups +from rack.api.v1 import networks +from rack.api.v1 import keypairs +from rack.api.v1 import securitygroups +from rack.api.v1 import processes +from rack.api import versions +from rack import exception +from rack.openstack.common import gettextutils +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack import utils +from rack import wsgi as base_wsgi + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class APIMapper(routes.Mapper): + def routematch(self, url=None, environ=None): + if url == "": + result = self._match("", environ) + return result[0], result[1] + return routes.Mapper.routematch(self, url, environ) + + def connect(self, *args, **kargs): + # NOTE(vish): Default the format part of a route to only accept json + # and xml so it doesn't eat all characters after a '.' + # in the url. + kargs.setdefault('requirements', {}) + if not kargs['requirements'].get('format'): + kargs['requirements']['format'] = 'json|xml' + return routes.Mapper.connect(self, *args, **kargs) + + +class APIRouter(base_wsgi.Router): + """Routes requests on the RACK API to the appropriate controller + and method. + """ + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, :class:`rack.wsgi.Router` doesn't have one.""" + return cls() + + def __init__(self): + mapper = APIMapper() + self._setup_routes(mapper) + super(APIRouter, self).__init__(mapper) + + def _setup_routes(self, mapper): + versions_resource = versions.create_resource() + mapper.connect("/", + controller=versions_resource, + action="show", + conditions={'method': ['GET']}) + + mapper.redirect("", "/") + + groups_resource = groups.create_resource() + mapper.connect("/groups", + controller=groups_resource, + action="index", + conditions={"method": ["GET"]}) + mapper.connect("/groups/{gid}", + controller=groups_resource, + action="show", + conditions={"method": ["GET"]}) + mapper.connect("/groups", + controller=groups_resource, + action="create", + conditions={"method": ["POST"]}) + mapper.connect("/groups/{gid}", + controller=groups_resource, + action="update", + conditions={"method": ["PUT"]}) + mapper.connect("/groups/{gid}", + controller=groups_resource, + action="delete", + conditions={"method": ["DELETE"]}) + + networks_resource = networks.create_resource() + mapper.connect("/groups/{gid}/networks", + controller=networks_resource, + action="index", + conditions={"method": ["GET"]}) + mapper.connect("/groups/{gid}/networks/{network_id}", + controller=networks_resource, + action="show", + conditions={"method": ["GET"]}) + mapper.connect("/groups/{gid}/networks", + controller=networks_resource, + action="create", + conditions={"method": ["POST"]}) + mapper.connect("/groups/{gid}/networks/{network_id}", + controller=networks_resource, + action="update", + conditions={"method": ["PUT"]}) + mapper.connect("/groups/{gid}/networks/{network_id}", + controller=networks_resource, + action="delete", + conditions={"method": ["DELETE"]}) + + keypairs_resource = keypairs.create_resource() + mapper.connect("/groups/{gid}/keypairs", + controller=keypairs_resource, + action="index", + conditions={"method": ["GET"]}) + mapper.connect("/groups/{gid}/keypairs/{keypair_id}", + controller=keypairs_resource, + action="show", + conditions={"method": ["GET"]}) + mapper.connect("/groups/{gid}/keypairs", + controller=keypairs_resource, + action="create", + conditions={"method": ["POST"]}) + mapper.connect("/groups/{gid}/keypairs/{keypair_id}", + controller=keypairs_resource, + action="update", + conditions={"method": ["PUT"]}) + mapper.connect("/groups/{gid}/keypairs/{keypair_id}", + controller=keypairs_resource, + action="delete", + conditions={"method": ["DELETE"]}) + + securitygroups_resource = securitygroups.create_resource() + mapper.connect("/groups/{gid}/securitygroups", + controller=securitygroups_resource, + action="index", + conditions={"method": ["GET"]}) + mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}", + controller=securitygroups_resource, + action="show", + conditions={"method": ["GET"]}) + mapper.connect("/groups/{gid}/securitygroups", + controller=securitygroups_resource, + action="create", + conditions={"method": ["POST"]}) + mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}", + controller=securitygroups_resource, + action="update", + conditions={"method": ["PUT"]}) + mapper.connect("/groups/{gid}/securitygroups/{securitygroup_id}", + controller=securitygroups_resource, + action="delete", + conditions={"method": ["DELETE"]}) + + processes_resource = processes.create_resource() + mapper.connect("/groups/{gid}/processes", + controller=processes_resource, + action="index", + conditions={"method": ["GET"]}) + mapper.connect("/groups/{gid}/processes/{pid}", + controller=processes_resource, + action="show", + conditions={"method": ["GET"]}) + mapper.connect("/groups/{gid}/processes", + controller=processes_resource, + action="create", + conditions={"method": ["POST"]}) + mapper.connect("/groups/{gid}/processes/{pid}", + controller=processes_resource, + action="delete", + conditions={"method": ["DELETE"]}) diff --git a/rack/api/v1/groups.py b/rack/api/v1/groups.py new file mode 100644 index 0000000..36bf33b --- /dev/null +++ b/rack/api/v1/groups.py @@ -0,0 +1,212 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import uuid + +import six +import webob + +from rack.api.v1.views import groups as views_groups +from rack.api import wsgi +from rack import db +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import uuidutils +from rack import utils + + +LOG = logging.getLogger(__name__) + + +class Controller(wsgi.Controller): + + """Group controller for RACK API.""" + + _view_builder_class = views_groups.ViewBuilder + + def __init__(self): + super(Controller, self).__init__() + + @wsgi.response(200) + def index(self, req): + filters = {} + project_id = req.params.get('project_id') + name = req.params.get('name') + status = req.params.get('status') + + if project_id: + filters['project_id'] = project_id + if name: + filters['display_name'] = name + if status: + filters['status'] = status + + context = req.environ['rack.context'] + group_list = db.group_get_all(context, filters) + + return self._view_builder.index(group_list) + + @wsgi.response(200) + def show(self, req, gid): + + def _validate(gid): + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + try: + _validate(gid) + context = req.environ['rack.context'] + group = db.group_get_by_gid(context, gid) + except exception.NotFound: + msg = _("Group could not be found") + raise webob.exc.HTTPNotFound(explanation=msg) + + return self._view_builder.show(group) + + @wsgi.response(201) + def create(self, req, body): + + def _validate(body): + if not self.is_valid_body(body, 'group'): + msg = _("Invalid request body") + raise exception.InvalidInput(reason=msg) + + values = body["group"] + name = values.get("name") + description = values.get("description") + + if not name: + msg = _("Group name is required") + raise exception.InvalidInput(reason=msg) + + if isinstance(name, six.string_types): + name = name.strip() + utils.check_string_length(name, 'name', min_length=1, + max_length=255) + + if description: + utils.check_string_length(description, 'description', + min_length=0, max_length=255) + + valid_values = {} + valid_values["display_name"] = name + valid_values["display_description"] = description + return valid_values + + try: + values = _validate(body) + except exception.InvalidInput as exc: + raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) + + context = req.environ['rack.context'] + values["gid"] = unicode(uuid.uuid4()) + values["user_id"] = context.user_id + values["project_id"] = context.project_id + values["status"] = "ACTIVE" + group = db.group_create(context, values) + + return self._view_builder.create(group) + + @wsgi.response(200) + def update(self, req, body, gid): + + def _validate(body, gid): + if not self.is_valid_body(body, 'group'): + msg = _("Invalid request body") + raise exception.InvalidInput(reason=msg) + + values = body["group"] + name = values.get("name") + description = values.get("description") + + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + if name is None and description is None: + msg = _("Group name or description is required") + raise exception.InvalidInput(reason=msg) + + if name is not None: + if isinstance(name, six.string_types): + name = name.strip() + utils.check_string_length(name, 'name', min_length=1, + max_length=255) + + if description is not None: + utils.check_string_length(description, 'description', + min_length=0, max_length=255) + + valid_values = {} + if name: + valid_values["display_name"] = name + # allow blank string to clear description + if description is not None: + valid_values["display_description"] = description + valid_values["gid"] = gid + return valid_values + + context = req.environ['rack.context'] + + try: + values = _validate(body, gid) + group = db.group_update(context, values) + except exception.InvalidInput as exc: + raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) + except exception.GroupNotFound: + msg = _("Group could not be found") + raise webob.exc.HTTPNotFound(explanation=msg) + + return self._view_builder.update(group) + + @wsgi.response(204) + def delete(self, req, gid): + + def _validate(gid): + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + try: + _validate(gid) + + context = req.environ['rack.context'] + + keypairs = db.keypair_get_all(context, gid) + if keypairs: + raise exception.GroupInUse(gid=gid) + + securitygroups = db.securitygroup_get_all(context, gid) + if securitygroups: + raise exception.GroupInUse(gid=gid) + + networks = db.network_get_all(context, gid) + if networks: + raise exception.GroupInUse(gid=gid) + + processes = db.process_get_all(context, gid) + if processes: + raise exception.GroupInUse(gid=gid) + + db.group_delete(context, gid) + + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + except exception.GroupInUse as e: + raise webob.exc.HTTPConflict(explanation=e.format_message()) + + except Exception as e: + LOG.warn(e) + raise exception.GroupDeleteFailed() + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/rack/api/v1/keypairs.py b/rack/api/v1/keypairs.py new file mode 100644 index 0000000..3341f69 --- /dev/null +++ b/rack/api/v1/keypairs.py @@ -0,0 +1,235 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import uuid + +import six +import webob + +from rack.api.v1.views import keypairs as views_keypairs +from rack.api import wsgi +from rack import db +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import strutils +from rack.openstack.common import uuidutils +from rack.resourceoperator import rpcapi as operator_rpcapi +from rack.scheduler import rpcapi as scheduler_rpcapi +from rack import utils + + +LOG = logging.getLogger(__name__) + + +class Controller(wsgi.Controller): + """Keypair controller for RACK API.""" + + _view_builder_class = views_keypairs.ViewBuilder + + def __init__(self): + super(Controller, self).__init__() + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self.operator_rpcapi = operator_rpcapi.ResourceOperatorAPI() + + def _uuid_check(self, gid=None, keypair_id=None): + if gid: + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + if keypair_id: + if not uuidutils.is_uuid_like(keypair_id): + raise exception.KeypairNotFound(keypair_id=keypair_id) + + @wsgi.response(200) + def index(self, req, gid): + try: + self._uuid_check(gid=gid) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + filters = {} + keypair_id = req.params.get('keypair_id') + nova_keypair_id = req.params.get('nova_keypair_id') + name = req.params.get('name') + status = req.params.get('status') + is_default = req.params.get('is_default') + + if keypair_id: + filters['keypair_id'] = keypair_id + if nova_keypair_id: + filters['nova_keypair_id'] = nova_keypair_id + if name: + filters['display_name'] = name + if status: + filters['status'] = status + if is_default: + filters['is_default'] = is_default + + + + context = req.environ['rack.context'] + keypair_list = db.keypair_get_all(context, gid, filters) + return self._view_builder.index(keypair_list) + + @wsgi.response(200) + def show(self, req, gid, keypair_id): + try: + self._uuid_check(gid=gid, keypair_id=keypair_id) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + context = req.environ['rack.context'] + try: + keypair = db.keypair_get_by_keypair_id(context, gid, keypair_id) + except exception.KeypairNotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + return self._view_builder.show(keypair) + + @wsgi.response(202) + def create(self, req, body, gid): + + def _validate(body, gid): + if not self.is_valid_body(body, 'keypair'): + msg = _("Invalid request body") + raise exception.InvalidInput(reason=msg) + + self._uuid_check(gid) + values = body["keypair"] + name = values.get("name") + is_default = values.get("is_default") + + if name: + if isinstance(name, six.string_types): + name = name.strip() + utils.check_string_length(name, 'name', min_length=1, + max_length=255) + + if is_default: + try: + is_default = strutils.bool_from_string(is_default, strict=True) + except ValueError: + msg = _("is_default must be a boolean") + raise exception.InvalidInput(reason=msg) + else: + is_default = False + + valid_values = {} + valid_values["gid"] = gid + valid_values["display_name"] = name + valid_values["is_default"] = is_default + return valid_values + + try: + values = _validate(body, gid) + except exception.InvalidInput as e: + raise webob.exc.HTTPBadRequest(explanation=e.format_message()) + except exception.GroupNotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + context = req.environ['rack.context'] + values["keypair_id"] = unicode(uuid.uuid4()) + if not values["display_name"]: + values["display_name"] = "keypair-" + values["keypair_id"] + values["user_id"] = context.user_id + values["project_id"] = context.project_id + values["status"] = "BUILDING" + + try: + db.group_get_by_gid(context, gid) + keypair = db.keypair_create(context, values) + host = self.scheduler_rpcapi.select_destinations( + context, + request_spec={}, + filter_properties={}) + self.operator_rpcapi.keypair_create( + context, + host["host"], + gid=gid, + keypair_id=values["keypair_id"], + name=values["display_name"]) + except exception.GroupNotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + except Exception: + keypair_id = values["keypair_id"] + db.keypair_update(context, gid, keypair_id, {"status": "ERROR"}) + raise exception.KeypairCreateFailed() + + return self._view_builder.create(keypair) + + @wsgi.response(200) + def update(self, req, body, gid, keypair_id): + + def _validate(body, gid, keypair_id): + if not self.is_valid_body(body, 'keypair'): + msg = _("Invalid request body") + raise exception.InvalidInput(reason=msg) + + self._uuid_check(gid, keypair_id) + values = body["keypair"] + is_default = values.get("is_default") + + if is_default: + try: + is_default = strutils.bool_from_string(is_default, strict=True) + except ValueError: + msg = _("is_default must be a boolean") + raise exception.InvalidInput(reason=msg) + else: + msg = _("is_default is required") + raise exception.InvalidInput(reason=msg) + + valid_values = {"is_default": is_default} + return valid_values + + context = req.environ['rack.context'] + + try: + values = _validate(body, gid, keypair_id) + keypair = db.keypair_update(context, gid, keypair_id, values) + except exception.InvalidInput as e: + raise webob.exc.HTTPBadRequest(explanation=e.format_message()) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + return self._view_builder.update(keypair) + + @wsgi.response(204) + def delete(self, req, gid, keypair_id): + context = req.environ['rack.context'] + + try: + self._uuid_check(gid=gid, keypair_id=keypair_id) + filters = {"keypair_id": keypair_id} + processes = db.process_get_all(context, gid, filters=filters) + if processes: + raise exception.keypairInUse(keypair_id=keypair_id) + keypair = db.keypair_delete(context, gid, keypair_id) + host = self.scheduler_rpcapi.select_destinations( + context, + request_spec={}, + filter_properties={}) + self.operator_rpcapi.keypair_delete( + context, + host["host"], + nova_keypair_id=keypair["nova_keypair_id"]) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + except exception.keypairInUse as e: + raise webob.exc.HTTPConflict(explanation=e.format_message()) + except Exception as e: + LOG.warn(e) + raise exception.KeypairDeleteFailed() + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/rack/api/v1/networks.py b/rack/api/v1/networks.py new file mode 100644 index 0000000..fb18b36 --- /dev/null +++ b/rack/api/v1/networks.py @@ -0,0 +1,254 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack import db, utils +from rack import exception +from rack.api import wsgi +from rack.api.v1.views import networks as views_networks +from rack.openstack.common import log as logging, uuidutils, strutils +from rack.openstack.common.gettextutils import _ +from rack.resourceoperator import rpcapi as ro_rpcapi +from rack.scheduler import rpcapi as sch_rpcapi +import uuid + +import webob + + +LOG = logging.getLogger(__name__) + + +class Controller(wsgi.Controller): + + """Network controller for RACK API.""" + + _view_builder_class = views_networks.ViewBuilder + + def __init__(self): + self.scheduler_rpcapi = sch_rpcapi.SchedulerAPI() + self.resourceoperator_rpcapi = ro_rpcapi.ResourceOperatorAPI() + super(Controller, self).__init__() + + @wsgi.response(202) + def create(self, req, gid, body): + + def _validate(context, body, gid): + # validation checks + if not self.is_valid_body(body, "network"): + msg = _("Invalid request body") + raise exception.InvalidInput(reason=msg) + + values = body.get("network") + + # Required item + subnet = values.get("cidr") + if subnet is None: + msg = _("Ntwork cidr is required") + raise exception.InvalidInput(reason=msg) + if not utils.is_valid_cidr(subnet): + msg = _("cidr must be a CIDR") + raise exception.InvalidInput(reason=msg) + + # Non-essential items + network_id = unicode(uuid.uuid4()) + name = values.get("name") + if name is None or not name: + name = "net-" + network_id + else: + name = name.strip() + utils.check_string_length(name, 'name', min_length=1,max_length=255) + + is_admin = values.get("is_admin") + if is_admin: + try: + is_admin = strutils.bool_from_string( + is_admin, strict=True) + except ValueError: + msg = _("is_admin must be a boolean") + raise exception.InvalidInput(reason=msg) + else: + is_admin = False + + gateway = values.get("gateway") + if gateway is not None and not utils.is_valid_ip_address(gateway): + msg = _("Invalid gateway") + raise exception.InvalidInput(reason=msg) + + dns_nameservers = values.get("dns_nameservers") + if dns_nameservers is not None: + if isinstance(dns_nameservers, list): + for dns in dns_nameservers: + if dns == "" or not utils.is_valid_ip_address(dns): + msg = _("Invalid dns_nameservers") + raise exception.InvalidInput(reason=msg) + else: + msg = _("dns_nameservers must be list format") + raise exception.InvalidInput(reason=msg) + + ext_router = values.get("ext_router_id") + if ext_router is not None and not uuidutils.is_uuid_like(ext_router): + msg = _("ext_router must be a uuid") + raise exception.InvalidInput(reason=msg) + + valid_values1 = {} + valid_values1["network_id"] = network_id + valid_values1["gid"] = gid + valid_values1["neutron_network_id"] = None + valid_values1["is_admin"] = is_admin + valid_values1["subnet"] = subnet + valid_values1["ext_router"] = ext_router + valid_values1["user_id"] = context.user_id + valid_values1["project_id"] = context.project_id + valid_values1["display_name"] = name + valid_values1["status"] = "BUILDING" + valid_values1["deleted"] = 0 + + valid_values2 = {} + valid_values2["gateway"] = gateway + valid_values2["dns_nameservers"] = dns_nameservers + + valid_values = {} + valid_values["db"] = valid_values1 + valid_values["opst"] = valid_values2 + + return valid_values + + try: + context = req.environ['rack.context'] + values = _validate(context, body, gid) + except exception.InvalidInput as e: + raise webob.exc.HTTPBadRequest(explanation=e.format_message()) + + try: + # db access + self._check_gid(gid, is_create=True, context=context) + network = db.network_create(context, values["db"]) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + except Exception as e: + LOG.exception(e) + raise exception.NetworkCreateFailed() + + try: + # scheduler access + resourceoperator = self._get_resorceoperator(context) + # resource operator access + for k, v in values["opst"].items(): + if v is not None: + network[k] = v + self.resourceoperator_rpcapi.network_create(context, resourceoperator["host"], network) + except Exception as e: + LOG.exception(e) + error_values = {"status": "ERROR"} + db.network_update(context, network["network_id"], error_values) + raise exception.NetworkCreateFailed() + + return self._view_builder.create(network) + + @wsgi.response(200) + def index(self, req, gid): + def _validate(gid): + self._check_gid(gid) + + try: + context = req.environ['rack.context'] + _validate(gid) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + filters = {} + network_id = req.params.get('network_id') + neutron_network_id = req.params.get('neutron_network_id') + name = req.params.get('name') + status = req.params.get('status') + is_admin = req.params.get('is_admin') + subnet = req.params.get('subnet') + ext_router = req.params.get('ext_router') + + + if network_id: + filters['network_id'] = network_id + if neutron_network_id: + filters['neutron_network_id'] = neutron_network_id + if name: + filters['name'] = name + if status: + filters['status'] = status + if is_admin: + filters['is_admin'] = is_admin + if subnet: + filters['subnet'] = subnet + if ext_router: + filters['ext_router'] = ext_router + + + network_list = db.network_get_all(context, gid) + + return self._view_builder.index(network_list) + + @wsgi.response(200) + def show(self, req, gid, network_id): + def _validate(gid, network_id): + self._check_gid(gid) + if not uuidutils.is_uuid_like(network_id): + raise exception.NetworkNotFound(network_id=network_id) + + try: + context = req.environ['rack.context'] + _validate(gid, network_id) + network = db.network_get_by_network_id(context, gid, network_id) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + return self._view_builder.show(network) + + @wsgi.response(204) + def delete(self, req, gid, network_id): + def _validate(gid, network_id): + self._check_gid(gid) + if not uuidutils.is_uuid_like(network_id): + raise exception.NetworkNotFound(network_id=network_id) + + try: + context = req.environ['rack.context'] + _validate(gid, network_id) + network = db.network_get_by_network_id(context, gid, network_id) + if network["processes"]: + raise exception.NetworkInUse(network_id=network_id) + network = db.network_delete(context, gid, network_id) + resourceoperator = self._get_resorceoperator(context) + self.resourceoperator_rpcapi.network_delete( + context, resourceoperator["host"], + neutron_network_id=network["neutron_network_id"], + ext_router=network["ext_router"]) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + except exception.NetworkInUse as e: + raise webob.exc.HTTPConflict(explanation=e.format_message()) + except Exception as e: + LOG.exception(e) + raise exception.NetworkDeleteFailed() + + def _check_gid(self, gid, is_create=False, context=None): + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + if is_create: + db.group_get_by_gid(context, gid) + + def _get_resorceoperator(self, context, + request_spec={}, filter_properties={}): + resorceoperator = self.scheduler_rpcapi.select_destinations(context, request_spec, filter_properties) + return resorceoperator + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/rack/api/v1/processes.py b/rack/api/v1/processes.py new file mode 100644 index 0000000..b04a1c1 --- /dev/null +++ b/rack/api/v1/processes.py @@ -0,0 +1,312 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import uuid +import netaddr + +import six +import webob + +from rack.api.v1.views import processes as views_processes +from rack.api import wsgi +from rack import db +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import uuidutils +from rack.openstack.common import strutils +from rack import utils +from rack.scheduler import rpcapi as scheduler_rpcapi +from rack.resourceoperator import rpcapi as operator_rpcapi + + + +LOG = logging.getLogger(__name__) + +class Controller(wsgi.Controller): + + """Process controller for RACK API.""" + + _view_builder_class = views_processes.ViewBuilder + + def __init__(self): + super(Controller, self).__init__() + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self.operator_rpcapi = operator_rpcapi.ResourceOperatorAPI() + + + @wsgi.response(200) + def index(self, req, gid): + + def _validate(gid): + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + try: + _validate(gid) + except exception.ProcessNotFound: + msg = _("Process could not be found") + raise webob.exc.HTTPNotFound(explanation=msg) + + filters = {} + pid = req.params.get('pid') + ppid = req.params.get('ppid') + name = req.params.get('name') + status = req.params.get('status') + glance_image_id = req.params.get('glance_image_id') + nova_flavor_id = req.params.get('nova_flavor_id') + securitygroup_id = req.params.get('securitygroup_id') + network_id = req.params.get('network_id') + keypair_id = req.params.get('keypair_id') + + if pid: + filters['pid'] = pid + if ppid: + filters['ppid'] = ppid + if name: + filters['name'] = name + if status: + filters['status'] = status + if glance_image_id: + filters['glance_image_id'] = glance_image_id + if nova_flavor_id: + filters['nova_flavor_id'] = nova_flavor_id + if securitygroup_id: + filters['securitygroup_id'] = securitygroup_id + if network_id: + filters['network_id'] = network_id + if keypair_id: + filters['keypair_id'] = keypair_id + + context = req.environ['rack.context'] + process_list = db.process_get_all(context, gid, filters) + + return self._view_builder.index(process_list) + + @wsgi.response(200) + def show(self, req, gid, pid): + + def _validate(gid, pid): + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + if not uuidutils.is_uuid_like(pid): + raise exception.ProcessNotFound(pid=pid) + + try: + _validate(gid, pid) + context = req.environ['rack.context'] + process = db.process_get_by_pid(context, gid, pid) + except exception.NotFound as exc: + raise webob.exc.HTTPNotFound(explanation=exc.format_message()) + + return self._view_builder.show(process) + + + @wsgi.response(202) + def create(self, req, body, gid): + + def _validate_process(context, gid, body): + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + if not self.is_valid_body(body, 'process'): + msg = _("Invalid request body") + raise exception.InvalidInput(reason=msg) + + values = body["process"] + ppid = values.get("ppid") + keypair_id = values.get("keypair_id") + name = values.get("name") + glance_image_id = values.get("glance_image_id") + nova_flavor_id = values.get("nova_flavor_id") + securitygroup_ids = values.get("securitygroup_ids") + + if ppid is not None: + if not uuidutils.is_uuid_like(ppid): + raise exception.ProcessNotFound(pid=ppid) + p_process = db.process_get_by_pid(context, gid, ppid) + + if keypair_id is not None: + if not uuidutils.is_uuid_like(keypair_id): + raise exception.KeypairNotFound(keypair_id=keypair_id) + elif ppid is not None: + keypair_id = p_process.get("keypair_id") + + if isinstance(name, six.string_types): + name = name.strip() + utils.check_string_length(name, 'name', min_length=1, + max_length=255) + elif name is not None: + msg = _("name must be a String") + raise exception.InvalidInput(reason=msg) + + if glance_image_id is None: + if ppid is not None: + glance_image_id = p_process.get("glance_image_id") + elif not uuidutils.is_uuid_like(glance_image_id): + msg = _("glance_image_id is invalid format") + raise exception.InvalidInput(reason=msg) + + if nova_flavor_id is None and ppid is not None: + nova_flavor_id = p_process.get("nova_flavor_id") + utils.validate_integer(nova_flavor_id, 'nova_flavor_id') + + if not securitygroup_ids: + if ppid is not None: + securitygroup_ids = [securitygroup.get("securitygroup_id") + for securitygroup in p_process.get("securitygroups")] + else: + msg = _("securitygroup_ids is required") + raise exception.InvalidInput(reason=msg) + + if isinstance(securitygroup_ids, list): + for securitygroup_id in securitygroup_ids: + if securitygroup_id is not None and not uuidutils.is_uuid_like(securitygroup_id): + raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) + else: + msg = _("securitygroup_ids must be list") + raise exception.InvalidInput(reason=msg) + + valid_values = {} + valid_values_process = {} + valid_values_process["gid"] = gid + valid_values_process["keypair_id"] = keypair_id + valid_values_process["ppid"] = ppid + valid_values_process["display_name"] = name + valid_values_process["glance_image_id"] = glance_image_id + valid_values_process["nova_flavor_id"] = nova_flavor_id + + valid_values_securitygroup = {} + valid_values_securitygroup["securitygroup_ids"] = securitygroup_ids + + valid_values["process"] = valid_values_process + valid_values["securitygroup"] = valid_values_securitygroup + return valid_values + + def _validate_metadata(metadata): + if metadata is None: + return {} + + if not isinstance(metadata, dict): + msg = _("metadata must be a dict") + raise exception.InvalidInput(reason=msg) + + return metadata + + try: + context = req.environ['rack.context'] + valid_values = _validate_process(context, gid, body) + values = valid_values.get("process") + securitygroup_ids = valid_values.get("securitygroup").get("securitygroup_ids") + metadata = _validate_metadata( + body["process"].get("metadata")) + + values["deleted"] = 0 + values["status"] = "BUILDING" + values["pid"] = unicode(uuid.uuid4()) + values["user_id"] = context.user_id + values["project_id"] = context.project_id + values["display_name"] = values["display_name"] or "pro-" + values["pid"] + + if values["ppid"]: + db.process_get_by_pid(context, gid, values["ppid"]) + if values["keypair_id"]: + nova_keypair_id = db.keypair_get_by_keypair_id( + context, gid, values["keypair_id"]).get("nova_keypair_id") + else: + nova_keypair_id = None + networks = db.network_get_all(context, gid, {"status":"ACTIVE"}) + if not networks: + raise exception.NoNetworksFound(gid=values["gid"]) + network_ids = [network["network_id"] for network in networks] + process = db.process_create(context, values, network_ids, securitygroup_ids) + + except exception.InvalidInput as e: + raise webob.exc.HTTPBadRequest(explanation=e.format_message()) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.format_message()) + + + try: + host = self.scheduler_rpcapi.select_destinations( + context, + request_spec={}, + filter_properties={}) + self.operator_rpcapi.process_create( + context, + host["host"], + pid=values["pid"], + ppid=values["ppid"] or values["pid"], + gid=gid, + name=values["display_name"], + glance_image_id=values["glance_image_id"], + nova_flavor_id=values["nova_flavor_id"], + nova_keypair_id=nova_keypair_id, + neutron_securitygroup_ids=[securitygroup["neutron_securitygroup_id"] for securitygroup in process["securitygroups"]], + neutron_network_ids=[network["neutron_network_id"] for network in process["networks"]], + metadata=metadata) + except Exception as e: + LOG.exception(e) + pid = values["pid"] + db.process_update(context, gid, pid, {"status": "ERROR"}) + raise exception.ProcessCreateFailed() + + return self._view_builder.create(process) + + + @wsgi.response(204) + def delete(self, req, gid, pid): + + def _validate(gid, pid): + + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + if not uuidutils.is_uuid_like(pid): + raise exception.ProcessNotFound(pid=pid) + + def _get_child_pid(context, gid, pid): + processes = db.process_get_all(context, gid, {"ppid": pid}) + targets=[] + for process in processes: + targets.append(process["pid"]) + targets.extend(_get_child_pid(context, gid, process["pid"])) + return targets + + try: + _validate(gid, pid) + context = req.environ['rack.context'] + targets = _get_child_pid(context, gid, pid) + targets.append(pid) + + for target in targets: + process = db.process_delete(context, gid, target) + host = self.scheduler_rpcapi.select_destinations( + context, + request_spec={}, + filter_properties={}) + self.operator_rpcapi.process_delete( + context, + host["host"], + nova_instance_id=process["nova_instance_id"]) + + except exception.NotFound as exc: + raise webob.exc.HTTPNotFound(explanation=exc.format_message()) + + except Exception as e: + LOG.exception(e) + raise exception.ProcessDeleteFailed() + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/rack/api/v1/securitygroups.py b/rack/api/v1/securitygroups.py new file mode 100644 index 0000000..5004b51 --- /dev/null +++ b/rack/api/v1/securitygroups.py @@ -0,0 +1,321 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import uuid +import netaddr + +import six +import webob + +from rack.api.v1.views import securitygroups as views_securitygroups +from rack.api import wsgi +from rack import db +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import uuidutils +from rack.openstack.common import strutils +from rack import utils +from rack.scheduler import rpcapi as scheduler_rpcapi +from rack.resourceoperator import rpcapi as operator_rpcapi + + + +LOG = logging.getLogger(__name__) + +class Controller(wsgi.Controller): + + """Securitygroup controller for RACK API.""" + + _view_builder_class = views_securitygroups.ViewBuilder + + def __init__(self): + super(Controller, self).__init__() + self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() + self.operator_rpcapi = operator_rpcapi.ResourceOperatorAPI() + + + @wsgi.response(200) + def index(self, req, gid): + + def _validate(gid): + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + try: + _validate(gid) + except exception.SecuritygroupNotFound: + msg = _("Securitygroup could not be found") + raise webob.exc.HTTPNotFound(explanation=msg) + + filters = {} + securitygroup_id = req.params.get('securitygroup_id') + name = req.params.get('name') + status = req.params.get('status') + is_default = req.params.get('is_default') + + if securitygroup_id: + filters['securitygroup_id'] = securitygroup_id + if name: + filters['name'] = name + if status: + filters['status'] = status + if is_default: + filters['is_default'] = is_default + + context = req.environ['rack.context'] + securitygroup_list = db.securitygroup_get_all(context, gid, filters) + + return self._view_builder.index(securitygroup_list) + + @wsgi.response(200) + def show(self, req, gid, securitygroup_id): + + def _validate(gid, securitygroup_id): + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + if not uuidutils.is_uuid_like(securitygroup_id): + raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) + + try: + _validate(gid, securitygroup_id) + context = req.environ['rack.context'] + securitygroup = db.securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id) + except exception.NotFound as exc: + raise webob.exc.HTTPNotFound(explanation=exc.format_message()) + + return self._view_builder.show(securitygroup) + + + @wsgi.response(202) + def create(self, req, body, gid): + + def _validate_securitygroup(gid, body): + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + if not self.is_valid_body(body, 'securitygroup'): + msg = _("Invalid request body") + raise exception.InvalidInput(reason=msg) + + values = body["securitygroup"] + name = values.get("name") + is_default = values.get("is_default") + + if isinstance(name, six.string_types): + name = name.strip() + utils.check_string_length(name, 'name', min_length=1, + max_length=255) + + if is_default: + try: + is_default = strutils.bool_from_string(is_default, strict=True) + except ValueError: + msg = _("is_default must be a boolean") + raise exception.InvalidInput(reason=msg) + else: + is_default = False + + valid_values = {} + valid_values["gid"] = gid + valid_values["display_name"] = name + valid_values["is_default"] = is_default + return valid_values + + def _validate_securitygrouprules(securitygrouprules): + + valid_securitygrouprules = [] + for securitygroup in securitygrouprules: + protocol = securitygroup.get("protocol") + port_range_max = securitygroup.get("port_range_max") + port_range_min = securitygroup.get("port_range_min") + remote_securitygroup_id = securitygroup.get("remote_securitygroup_id") + remote_ip_prefix = securitygroup.get("remote_ip_prefix") + + if not protocol: + msg = _("SecurityGroupRule protocol is required") + raise exception.InvalidInput(reason=msg) + elif not utils.is_valid_protocol(protocol): + msg = _("SecurityGroupRule protocol should be tcp or udp or icmp") + raise exception.InvalidInput(reason=msg) + + if not remote_securitygroup_id and not remote_ip_prefix: + msg = _("SecurityGroupRule either remote_securitygroup_id or remote_ip_prefix is required") + raise exception.InvalidInput(reason=msg) + elif remote_securitygroup_id and remote_ip_prefix: + msg = _("SecurityGroupRule either remote_securitygroup_id or remote_ip_prefix is required") + raise exception.InvalidInput(reason=msg) + elif remote_securitygroup_id is not None: + if not uuidutils.is_uuid_like(remote_securitygroup_id): + raise exception.SecuritygroupNotFound(securitygroup_id=remote_securitygroup_id) + elif remote_ip_prefix is not None: + if not utils.is_valid_cidr(remote_ip_prefix): + msg = _("SecurityGroupRule remote_ip_prefix should be cidr format") + raise exception.InvalidInput(reason=msg) + + if protocol in ["tcp","udp"]: + if port_range_max is None: + msg = _("SecurityGroupRule port_range_max is required") + raise exception.InvalidInput(reason=msg) + utils.validate_integer(port_range_max, 'port_range_max', min_value=1, max_value=65535) + if port_range_min: + utils.validate_integer(port_range_min, 'port_range_min', min_value=1, max_value=65535) + if port_range_min > port_range_max: + msg = _("SecurityGroupRule port_range_min should be lower than port_range_max") + raise exception.InvalidInput(reason=msg) + elif protocol == "icmp": + port_range_max = None + port_range_min = None + + valid_securitygrouprules.append({ + "protocol":protocol, + "port_range_max":port_range_max, + "port_range_min":port_range_min, + "remote_securitygroup_id":remote_securitygroup_id, + "remote_ip_prefix": unicode(netaddr.IPNetwork(remote_ip_prefix)) if remote_ip_prefix else remote_ip_prefix + }) + return valid_securitygrouprules + + try: + context = req.environ['rack.context'] + values = _validate_securitygroup(gid, body) + if(body["securitygroup"].get("securitygrouprules")): + securitygrouprules = _validate_securitygrouprules( + body["securitygroup"].get("securitygrouprules")) + else: + securitygrouprules = [] + except exception.InvalidInput as exc: + raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) + except exception.NotFound as exc: + raise webob.exc.HTTPNotFound(explanation=exc.format_message()) + + values["deleted"] = 0 + values["status"] = "BUILDING" + values["securitygroup_id"] = unicode(uuid.uuid4()) + values["user_id"] = context.user_id + values["project_id"] = context.project_id + values["display_name"] = values["display_name"] or "sec-" + values["securitygroup_id"] + + try: + for i in range(len(securitygrouprules)): + if securitygrouprules[i]["remote_securitygroup_id"]: + securitygroup = db.securitygroup_get_by_securitygroup_id( + context, gid, securitygrouprules[i]["remote_securitygroup_id"]) + remote_neutron_securitygroup_id = securitygroup.get("neutron_securitygroup_id") + securitygrouprules[i]["remote_neutron_securitygroup_id"] = remote_neutron_securitygroup_id + db.group_get_by_gid(context, gid) + securitygroup = db.securitygroup_create(context, values) + except exception.NotFound as exc: + raise webob.exc.HTTPNotFound(explanation=exc.format_message()) + + try: + host = self.scheduler_rpcapi.select_destinations( + context, + request_spec={}, + filter_properties={}) + self.operator_rpcapi.securitygroup_create( + context, + host["host"], + gid=gid, + securitygroup_id=values["securitygroup_id"], + name=values["display_name"], + securitygrouprules=securitygrouprules) + except Exception: + securitygroup_id = values["securitygroup_id"] + db.securitygroup_update(context, gid, securitygroup_id, {"status": "ERROR"}) + raise exception.SecuritygroupCreateFailed() + + return self._view_builder.create(securitygroup) + + @wsgi.response(200) + def update(self, req, body, gid, securitygroup_id): + + def _validate(body, gid, securitygroup_id): + if not self.is_valid_body(body, 'securitygroup'): + msg = _("Invalid request body") + raise exception.InvalidInput(reason=msg) + + values = body["securitygroup"] + is_default = values.get("is_default") + + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + if not uuidutils.is_uuid_like(securitygroup_id): + raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) + + if is_default: + try: + is_default = strutils.bool_from_string(is_default, strict=True) + except ValueError: + msg = _("is_default must be a boolean") + raise exception.InvalidInput(reason=msg) + else: + msg = _("SecurityGroup is_default is required") + raise exception.InvalidInput(reason=msg) + + + valid_values = {} + valid_values["is_default"] = is_default + return valid_values + + + try: + values = _validate(body, gid, securitygroup_id) + context = req.environ['rack.context'] + securitygroup = db.securitygroup_update(context, gid, securitygroup_id, values) + except exception.InvalidInput as exc: + raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) + except exception.NotFound as exc: + raise webob.exc.HTTPNotFound(explanation=exc.format_message()) + + return self._view_builder.update(securitygroup) + + @wsgi.response(204) + def delete(self, req, gid, securitygroup_id): + + def _validate(gid, securitygroup_id): + + if not uuidutils.is_uuid_like(gid): + raise exception.GroupNotFound(gid=gid) + + if not uuidutils.is_uuid_like(securitygroup_id): + raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) + + try: + _validate(gid, securitygroup_id) + context = req.environ['rack.context'] + securitygroup = db.securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id) + if securitygroup["processes"]: + raise exception.SecuritygroupInUse(securitygroup_id=securitygroup_id) + securitygroup = db.securitygroup_delete(context, gid, securitygroup_id) + except exception.SecuritygroupInUse as exc: + raise webob.exc.HTTPConflict(explanation=exc.format_message()) + except exception.NotFound as exc: + raise webob.exc.HTTPNotFound(explanation=exc.format_message()) + + try: + host = self.scheduler_rpcapi.select_destinations( + context, + request_spec={}, + filter_properties={}) + self.operator_rpcapi.securitygroup_delete( + context, + host["host"], + neutron_securitygroup_id=securitygroup["neutron_securitygroup_id"]) + except Exception: + raise exception.SecuritygroupDeleteFailed() + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/rack/api/v1/views/__init__.py b/rack/api/v1/views/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/api/v1/views/groups.py b/rack/api/v1/views/groups.py new file mode 100644 index 0000000..7b60d3a --- /dev/null +++ b/rack/api/v1/views/groups.py @@ -0,0 +1,49 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack.api import common +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model a group API response as a python dictionary.""" + + def index(self, group_list): + return dict(groups= + [self._base_response(group) + for group in group_list]) + + def show(self, group): + base = self._base_response(group) + return dict(group=base) + + def create(self, group): + base = self._base_response(group) + return dict(group=base) + + def update(self, group): + base = self._base_response(group) + return dict(group=base) + + def _base_response(self, group): + return { + "gid": group["gid"], + "user_id": group["user_id"], + "project_id": group["project_id"], + "name": group["display_name"], + "description": group["display_description"], + "status": group["status"] + } + diff --git a/rack/api/v1/views/keypairs.py b/rack/api/v1/views/keypairs.py new file mode 100644 index 0000000..9460109 --- /dev/null +++ b/rack/api/v1/views/keypairs.py @@ -0,0 +1,51 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack.api import common +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model a keypair API response as a python dictionary.""" + + def index(self, keypair_list): + return dict(keypairs= + [self._base_response(keypair) + for keypair in keypair_list]) + + def show(self, keypair): + base = self._base_response(keypair) + return dict(keypair=base) + + def create(self, keypair): + base = self._base_response(keypair) + return dict(keypair=base) + + def update(self, keypair): + base = self._base_response(keypair) + return dict(keypair=base) + + def _base_response(self, keypair): + return { + "keypair_id": keypair.get("keypair_id", ""), + "nova_keypair_id": keypair.get("nova_keypair_id", ""), + "user_id": keypair.get("user_id", ""), + "project_id": keypair.get("project_id", ""), + "gid": keypair.get("gid", ""), + "name": keypair.get("display_name", ""), + "private_key": keypair.get("private_key", ""), + "is_default": keypair.get("is_default", ""), + "status": keypair.get("status", "") + } diff --git a/rack/api/v1/views/networks.py b/rack/api/v1/views/networks.py new file mode 100644 index 0000000..8301bd9 --- /dev/null +++ b/rack/api/v1/views/networks.py @@ -0,0 +1,47 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack.api import common +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + """Model a networks API response as a python dictionary.""" + + def index(self, network_list): + return dict(networks=[ + self._base_response(network) for network in network_list]) + + def show(self, network): + base = self._base_response(network) + return dict(network=base) + + def create(self, network): + base = self._base_response(network) + return dict(network=base) + + def _base_response(self, network): + return { + "network_id": network["network_id"], + "neutron_network_id": network["neutron_network_id"], + "gid": network["gid"], + "user_id": network["user_id"], + "project_id": network["project_id"], + "name": network["display_name"], + "is_admin": network["is_admin"], + "cidr": network["subnet"], + "ext_router_id": network["ext_router"], + "status": network["status"] + } diff --git a/rack/api/v1/views/processes.py b/rack/api/v1/views/processes.py new file mode 100644 index 0000000..113dfa5 --- /dev/null +++ b/rack/api/v1/views/processes.py @@ -0,0 +1,56 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack.api import common +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + + """Model a process API response as a python dictionary.""" + def index(self, process_list): + return dict(processes= + [self._base_response(process) + for process in process_list]) + + def show(self, process): + base = self._base_response(process) + return dict(process=base) + + def create(self, process): + base = self._base_response(process) + return dict(process=base) + + def update(self, process): + base = self._base_response(process) + return dict(process=base) + + def _base_response(self, process): + return { + "gid": process.get("gid"), + "pid": process.get("pid"), + "ppid": process.get("ppid", ""), + "user_id": process.get("user_id"), + "project_id": process.get("project_id"), + "name": process.get("display_name"), + "glance_image_id": process.get("glance_image_id"), + "nova_flavor_id": process.get("nova_flavor_id"), + "status": process.get("status"), + "keypair_id": process.get("keypair_id"), + "network_ids": [network.get("network_id") + for network in process.get("networks")], + "securitygroup_ids": [securitygroup.get("securitygroup_id") + for securitygroup in process.get("securitygroups")], + } diff --git a/rack/api/v1/views/securitygroups.py b/rack/api/v1/views/securitygroups.py new file mode 100644 index 0000000..d16feb8 --- /dev/null +++ b/rack/api/v1/views/securitygroups.py @@ -0,0 +1,50 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack.api import common +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class ViewBuilder(common.ViewBuilder): + + """Model a securitygroup API response as a python dictionary.""" + def index(self, securitygroup_list): + return dict(securitygroups= + [self._base_response(securitygroup) + for securitygroup in securitygroup_list]) + + def show(self, securitygroup): + base = self._base_response(securitygroup) + return dict(securitygroup=base) + + def create(self, securitygroup): + base = self._base_response(securitygroup) + return dict(securitygroup=base) + + def update(self, securitygroup): + base = self._base_response(securitygroup) + return dict(securitygroup=base) + + def _base_response(self, securitygroup): + return { + "securitygroup_id": securitygroup.get("securitygroup_id"), + "neutron_securitygroup_id": securitygroup.get("neutron_securitygroup_id"), + "user_id": securitygroup.get("user_id"), + "project_id": securitygroup.get("project_id"), + "gid": securitygroup.get("gid"), + "name": securitygroup.get("display_name"), + "is_default": securitygroup.get("is_default"), + "status": securitygroup.get("status") + } \ No newline at end of file diff --git a/rack/api/versions.py b/rack/api/versions.py new file mode 100644 index 0000000..24ef99f --- /dev/null +++ b/rack/api/versions.py @@ -0,0 +1,243 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from lxml import etree +from oslo.config import cfg + +from rack.api.views import versions as views_versions +from rack.api import wsgi +from rack.api import xmlutil +from rack.openstack.common import timeutils + + +CONF = cfg.CONF + +LINKS = { + 'v2.0': { + 'pdf': 'http://docs.openstack.org/' + 'api/openstack-compute/2/os-compute-devguide-2.pdf', + 'wadl': 'http://docs.openstack.org/' + 'api/openstack-compute/2/wadl/os-compute-2.wadl' + }, +} + + +VERSIONS = { + "v2.0": { + "id": "v2.0", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": LINKS['v2.0']['pdf'], + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": LINKS['v2.0']['wadl'], + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute+xml;version=2", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute+json;version=2", + } + ], + }, +} + + +class MediaTypesTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return 'media-types' in datum + + +def make_version(elem): + elem.set('id') + elem.set('status') + elem.set('updated') + + mts = MediaTypesTemplateElement('media-types') + elem.append(mts) + + mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') + mt.set('base') + mt.set('type') + + xmlutil.make_links(elem, 'links') + + +version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class VersionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('version', selector='version') + make_version(root) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class VersionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('versions') + elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class ChoicesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('choices') + elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class AtomSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_ATOM} + + def __init__(self, metadata=None, xmlns=None): + self.metadata = metadata or {} + if not xmlns: + self.xmlns = wsgi.XMLNS_ATOM + else: + self.xmlns = xmlns + + def _get_most_recent_update(self, versions): + recent = None + for version in versions: + updated = timeutils.parse_strtime(version['updated'], + '%Y-%m-%dT%H:%M:%SZ') + if not recent: + recent = updated + elif updated > recent: + recent = updated + + return recent.strftime('%Y-%m-%dT%H:%M:%SZ') + + def _get_base_url(self, link_href): + # Make sure no trailing / + link_href = link_href.rstrip('/') + return link_href.rsplit('/', 1)[0] + '/' + + def _create_feed(self, versions, feed_title, feed_id): + feed = etree.Element('feed', nsmap=self.NSMAP) + title = etree.SubElement(feed, 'title') + title.set('type', 'text') + title.text = feed_title + + # Set this updated to the most recently updated version + recent = self._get_most_recent_update(versions) + etree.SubElement(feed, 'updated').text = recent + + etree.SubElement(feed, 'id').text = feed_id + + link = etree.SubElement(feed, 'link') + link.set('rel', 'self') + link.set('href', feed_id) + + author = etree.SubElement(feed, 'author') + etree.SubElement(author, 'name').text = 'Rackspace' + etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' + + for version in versions: + feed.append(self._create_version_entry(version)) + + return feed + + def _create_version_entry(self, version): + entry = etree.Element('entry') + etree.SubElement(entry, 'id').text = version['links'][0]['href'] + title = etree.SubElement(entry, 'title') + title.set('type', 'text') + title.text = 'Version %s' % version['id'] + etree.SubElement(entry, 'updated').text = version['updated'] + + for link in version['links']: + link_elem = etree.SubElement(entry, 'link') + link_elem.set('rel', link['rel']) + link_elem.set('href', link['href']) + if 'type' in link: + link_elem.set('type', link['type']) + + content = etree.SubElement(entry, 'content') + content.set('type', 'text') + content.text = 'Version %s %s (%s)' % (version['id'], + version['status'], + version['updated']) + return entry + + +class VersionsAtomSerializer(AtomSerializer): + def default(self, data): + versions = data['versions'] + feed_id = self._get_base_url(versions[0]['links'][0]['href']) + feed = self._create_feed(versions, 'Available API Versions', feed_id) + return self._to_xml(feed) + + +class VersionAtomSerializer(AtomSerializer): + def default(self, data): + version = data['version'] + feed_id = version['links'][0]['href'] + feed = self._create_feed([version], 'About This Version', feed_id) + return self._to_xml(feed) + + +class Versions(wsgi.Resource): + def __init__(self): + super(Versions, self).__init__(None) + + @wsgi.serializers(xml=VersionsTemplate, + atom=VersionsAtomSerializer) + def index(self, req): + """Return all versions.""" + builder = views_versions.get_view_builder(req) + return builder.build_versions(VERSIONS) + + @wsgi.serializers(xml=ChoicesTemplate) + @wsgi.response(300) + def multi(self, req): + """Return multiple choices.""" + builder = views_versions.get_view_builder(req) + return builder.build_choices(VERSIONS, req) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + args = {} + if request_environment['PATH_INFO'] == '/': + args['action'] = 'index' + else: + args['action'] = 'multi' + + return args + + +class VersionV2(object): + @wsgi.serializers(xml=VersionTemplate, + atom=VersionAtomSerializer) + def show(self, req): + builder = views_versions.get_view_builder(req) + return builder.build_version(VERSIONS['v2.0']) + + +def create_resource(): + return wsgi.Resource(VersionV2()) diff --git a/rack/api/views/__init__.py b/rack/api/views/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/api/views/versions.py b/rack/api/views/versions.py new file mode 100644 index 0000000..cdb780b --- /dev/null +++ b/rack/api/views/versions.py @@ -0,0 +1,96 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import os + +from rack.api import common + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(common.ViewBuilder): + + def __init__(self, base_url): + """:param base_url: url of the root wsgi application.""" + self.base_url = base_url + + def build_choices(self, VERSIONS, req): + version_objs = [] + for version in VERSIONS: + version = VERSIONS[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "links": [ + { + "rel": "self", + "href": self.generate_href(version['id'], req.path), + }, + ], + "media-types": version['media-types'], + }) + + return dict(choices=version_objs) + + def build_versions(self, versions): + version_objs = [] + for version in sorted(versions.keys()): + version = versions[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "updated": version['updated'], + "links": self._build_links(version), + }) + + return dict(versions=version_objs) + + def build_version(self, version): + reval = copy.deepcopy(version) + reval['links'].insert(0, { + "rel": "self", + "href": self.base_url.rstrip('/') + '/', + }) + return dict(version=reval) + + def _build_links(self, version_data): + """Generate a container of links that refer to the provided version.""" + href = self.generate_href(version_data['id']) + + links = [ + { + "rel": "self", + "href": href, + }, + ] + + return links + + def generate_href(self, version, path=None): + """Create an url that refers to a specific version_number.""" + prefix = self._update_compute_link_prefix(self.base_url) + if version.find('v3.') == 0: + version_number = 'v3' + else: + version_number = 'v2' + + if path: + path = path.strip('/') + return os.path.join(prefix, version_number, path) + else: + return os.path.join(prefix, version_number) + '/' diff --git a/rack/api/wsgi.py b/rack/api/wsgi.py new file mode 100644 index 0000000..14581e1 --- /dev/null +++ b/rack/api/wsgi.py @@ -0,0 +1,1302 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +import time +from xml.dom import minidom + +from lxml import etree +import six +import webob + +from rack.api import xmlutil +from rack import exception +from rack.openstack.common import gettextutils +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import jsonutils +from rack.openstack.common import log as logging +from rack import utils +from rack import wsgi + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' + +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' + +LOG = logging.getLogger(__name__) + +SUPPORTED_CONTENT_TYPES = ( + 'application/json', + 'application/vnd.openstack.compute+json', + 'application/xml', + 'application/vnd.openstack.compute+xml', +) + +_MEDIA_TYPE_MAP = { + 'application/vnd.openstack.compute+json': 'json', + 'application/json': 'json', + 'application/vnd.openstack.compute+xml': 'xml', + 'application/xml': 'xml', + 'application/atom+xml': 'atom', +} + +_ROUTES_METHODS = [ + 'create', + 'delete', + 'show', + 'update', +] + +_METHODS_WITH_BODY = [ + 'POST', + 'PUT', +] + + +class Request(webob.Request): + """Add some OpenStack API-specific logic to the base webob.Request.""" + + def __init__(self, *args, **kwargs): + super(Request, self).__init__(*args, **kwargs) + self._extension_data = {'db_items': {}} + + def cache_db_items(self, key, items, item_key='id'): + """Allow API methods to store objects from a DB query to be + used by API extensions within the same API request. + + An instance of this class only lives for the lifetime of a + single API request, so there's no need to implement full + cache management. + """ + db_items = self._extension_data['db_items'].setdefault(key, {}) + for item in items: + db_items[item[item_key]] = item + + def get_db_items(self, key): + """Allow an API extension to get previously stored objects within + the same API request. + + Note that the object data will be slightly stale. + """ + return self._extension_data['db_items'][key] + + def get_db_item(self, key, item_key): + """Allow an API extension to get a previously stored object + within the same API request. + + Note that the object data will be slightly stale. + """ + return self.get_db_items(key).get(item_key) + + def cache_db_instances(self, instances): + self.cache_db_items('instances', instances, 'uuid') + + def cache_db_instance(self, instance): + self.cache_db_items('instances', [instance], 'uuid') + + def get_db_instances(self): + return self.get_db_items('instances') + + def get_db_instance(self, instance_uuid): + return self.get_db_item('instances', instance_uuid) + + def cache_db_flavors(self, flavors): + self.cache_db_items('flavors', flavors, 'flavorid') + + def cache_db_flavor(self, flavor): + self.cache_db_items('flavors', [flavor], 'flavorid') + + def get_db_flavors(self): + return self.get_db_items('flavors') + + def get_db_flavor(self, flavorid): + return self.get_db_item('flavors', flavorid) + + def cache_db_compute_nodes(self, compute_nodes): + self.cache_db_items('compute_nodes', compute_nodes, 'id') + + def cache_db_compute_node(self, compute_node): + self.cache_db_items('compute_nodes', [compute_node], 'id') + + def get_db_compute_nodes(self): + return self.get_db_items('compute_nodes') + + def get_db_compute_node(self, id): + return self.get_db_item('compute_nodes', id) + + def best_match_content_type(self): + """Determine the requested response content-type.""" + if 'rack.best_content_type' not in self.environ: + # Calculate the best MIME type + content_type = None + + # Check URL path suffix + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in SUPPORTED_CONTENT_TYPES: + content_type = possible_type + + if not content_type: + content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) + + self.environ['rack.best_content_type'] = (content_type or + 'application/json') + + return self.environ['rack.best_content_type'] + + def get_content_type(self): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ + if "Content-Type" not in self.headers: + return None + + content_type = self.content_type + + # NOTE(markmc): text/plain is the default for eventlet and + # other webservers which use mimetools.Message.gettype() + # whereas twisted defaults to ''. + if not content_type or content_type == 'text/plain': + return None + + if content_type not in SUPPORTED_CONTENT_TYPES: + raise exception.InvalidContentType(content_type=content_type) + + return content_type + + def best_match_language(self): + """Determine the best available language for the request. + + :returns: the best language match or None if the 'Accept-Language' + header was not available in the request. + """ + if not self.accept_language: + return None + return self.accept_language.best_match( + gettextutils.get_available_languages('rack')) + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization.""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return jsonutils.loads(datastring) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class XMLDeserializer(TextDeserializer): + + def __init__(self, metadata=None): + """:param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + + def _from_xml(self, datastring): + plurals = set(self.metadata.get('plurals', {})) + node = xmlutil.safe_minidom_parse_string(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: + return node.childNodes[0].nodeValue + elif node.nodeName in listnames: + return [self._from_xml_node(n, listnames) for n in node.childNodes] + else: + result = dict() + for attr in node.attributes.keys(): + if not attr.startswith("xmlns"): + result[attr] = node.attributes[attr].nodeValue + for child in node.childNodes: + if child.nodeType != node.TEXT_NODE: + result[child.nodeName] = self._from_xml_node(child, + listnames) + return result + + def find_first_child_named_in_namespace(self, parent, namespace, name): + """Search a nodes children for the first child with a given name.""" + for node in parent.childNodes: + if (node.localName == name and + node.namespaceURI and + node.namespaceURI == namespace): + return node + return None + + def find_first_child_named(self, parent, name): + """Search a nodes children for the first child with a given name.""" + for node in parent.childNodes: + if node.localName == name: + return node + return None + + def find_children_named(self, parent, name): + """Return all of a nodes children who have the given name.""" + for node in parent.childNodes: + if node.localName == name: + yield node + + def extract_text(self, node): + """Get the text field contained by the given node.""" + ret_val = "" + for child in node.childNodes: + if child.nodeType == child.TEXT_NODE: + ret_val += child.nodeValue + return ret_val + + def extract_elements(self, node): + """Get only Element type childs from node.""" + elements = [] + for child in node.childNodes: + if child.nodeType == child.ELEMENT_NODE: + elements.append(child) + return elements + + def find_attribute_or_element(self, parent, name): + """Get an attribute value; fallback to an element if not found.""" + if parent.hasAttribute(name): + return parent.getAttribute(name) + + node = self.find_first_child_named(parent, name) + if node: + return self.extract_text(node) + + return None + + def default(self, datastring): + return {'body': self._from_xml(datastring)} + + +class MetadataXMLDeserializer(XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request.""" + metadata = {} + if metadata_node is not None: + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + +class DictSerializer(ActionDispatcher): + """Default request body serialization.""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization.""" + + def default(self, data): + return jsonutils.dumps(data) + + +class XMLDictSerializer(DictSerializer): + + def __init__(self, metadata=None, xmlns=None): + """:param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLDictSerializer, self).__init__() + self.metadata = metadata or {} + self.xmlns = xmlns + + def default(self, data): + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + doc = minidom.Document() + node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) + + return self.to_xml_string(node) + + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toxml('UTF-8') + + #NOTE (ameade): the has_atom should be removed after all of the + # xml serializers and view builders have been updated to the current + # spec that required all responses include the xmlns:atom, the has_atom + # flag is to prevent current tests from breaking + def _add_xmlns(self, node, has_atom=False): + if self.xmlns is not None: + node.setAttribute('xmlns', self.xmlns) + if has_atom: + node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") + + def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" + result = doc.createElement(nodename) + + # Set the xml namespace if one is specified + # TODO(justinsb): We could also use prefixes on the keys + xmlns = metadata.get('xmlns', None) + if xmlns: + result.setAttribute('xmlns', xmlns) + + #TODO(bcwaldon): accomplish this without a type-check + if isinstance(data, list): + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + #TODO(bcwaldon): accomplish this without a type-check + elif isinstance(data, dict): + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + if k == "deleted": + v = str(bool(v)) + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: + # Type is atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result + + def _create_link_nodes(self, xml_doc, links): + link_nodes = [] + for link in links: + link_node = xml_doc.createElement('atom:link') + link_node.setAttribute('rel', link['rel']) + link_node.setAttribute('href', link['href']) + if 'type' in link: + link_node.setAttribute('type', link['type']) + link_nodes.append(link_node) + return link_nodes + + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + return etree.tostring(root, encoding='UTF-8', xml_declaration=True) + + +def serializers(**serializers): + """Attaches serializers to a method. + + This decorator associates a dictionary of serializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_serializers'): + func.wsgi_serializers = {} + func.wsgi_serializers.update(serializers) + return func + return decorator + + +def deserializers(**deserializers): + """Attaches deserializers to a method. + + This decorator associates a dictionary of deserializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_deserializers'): + func.wsgi_deserializers = {} + func.wsgi_deserializers.update(deserializers) + return func + return decorator + + +def response(code): + """Attaches response code to a method. + + This decorator associates a response code with a method. Note + that the function attributes are directly manipulated; the method + is not wrapped. + """ + + def decorator(func): + func.wsgi_code = code + return func + return decorator + + +class ResponseObject(object): + """Bundles a response object with appropriate serializers. + + Object that app methods may return in order to bind alternate + serializers with a response object to be serialized. Its use is + optional. + """ + + def __init__(self, obj, code=None, headers=None, **serializers): + """Binds serializers with an object. + + Takes keyword arguments akin to the @serializer() decorator + for specifying serializers. Serializers specified will be + given preference over default serializers or method-specific + serializers on return. + """ + + self.obj = obj + self.serializers = serializers + self._default_code = 200 + self._code = code + self._headers = headers or {} + self.serializer = None + self.media_type = None + + def __getitem__(self, key): + """Retrieves a header with the given name.""" + + return self._headers[key.lower()] + + def __setitem__(self, key, value): + """Sets a header with the given name to the given value.""" + + self._headers[key.lower()] = value + + def __delitem__(self, key): + """Deletes the header with the given name.""" + + del self._headers[key.lower()] + + def _bind_method_serializers(self, meth_serializers): + """Binds method serializers with the response object. + + Binds the method serializers with the response object. + Serializers specified to the constructor will take precedence + over serializers specified to this method. + + :param meth_serializers: A dictionary with keys mapping to + response types and values containing + serializer objects. + """ + + # We can't use update because that would be the wrong + # precedence + for mtype, serializer in meth_serializers.items(): + self.serializers.setdefault(mtype, serializer) + + def get_serializer(self, content_type, default_serializers=None): + """Returns the serializer for the wrapped object. + + Returns the serializer for the wrapped object subject to the + indicated content type. If no serializer matching the content + type is attached, an appropriate serializer drawn from the + default serializers will be used. If no appropriate + serializer is available, raises InvalidContentType. + """ + + default_serializers = default_serializers or {} + + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in self.serializers: + return mtype, self.serializers[mtype] + else: + return mtype, default_serializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def preserialize(self, content_type, default_serializers=None): + """Prepares the serializer that will be used to serialize. + + Determines the serializer that will be used and prepares an + instance of it for later call. This allows the serializer to + be accessed by extensions for, e.g., template extension. + """ + + mtype, serializer = self.get_serializer(content_type, + default_serializers) + self.media_type = mtype + self.serializer = serializer() + + def attach(self, **kwargs): + """Attach slave templates to serializers.""" + + if self.media_type in kwargs: + self.serializer.attach(kwargs[self.media_type]) + + def serialize(self, request, content_type, default_serializers=None): + """Serializes the wrapped object. + + Utility method for serializing the wrapped object. Returns a + webob.Response object. + """ + + if self.serializer: + serializer = self.serializer + else: + _mtype, _serializer = self.get_serializer(content_type, + default_serializers) + serializer = _serializer() + + response = webob.Response() + response.status_int = self.code + for hdr, value in self._headers.items(): + response.headers[hdr] = utils.utf8(str(value)) + response.headers['Content-Type'] = utils.utf8(content_type) + if self.obj is not None: + response.body = serializer.serialize(self.obj) + + return response + + @property + def code(self): + """Retrieve the response status.""" + + return self._code or self._default_code + + @property + def headers(self): + """Retrieve the headers.""" + + return self._headers.copy() + + +def action_peek_json(body): + """Determine action to invoke.""" + + try: + decoded = jsonutils.loads(body) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + # Make sure there's exactly one key... + if len(decoded) != 1: + msg = _("too many body keys") + raise exception.MalformedRequestBody(reason=msg) + + # Return the action and the decoded body... + return decoded.keys()[0] + + +def action_peek_xml(body): + """Determine action to invoke.""" + + dom = xmlutil.safe_minidom_parse_string(body) + action_node = dom.childNodes[0] + + return action_node.tagName + + +class ResourceExceptionHandler(object): + """Context manager to handle Resource exceptions. + + Used when processing exceptions generated by API implementation + methods (or their extensions). Converts most exceptions to Fault + exceptions, with the appropriate logging. + """ + + def __enter__(self): + return None + + def __exit__(self, ex_type, ex_value, ex_traceback): + if not ex_value: + return True + + if isinstance(ex_value, exception.NotAuthorized): + raise Fault(webob.exc.HTTPForbidden( + explanation=ex_value.format_message())) + elif isinstance(ex_value, exception.Invalid): + raise Fault(exception.ConvertedException( + code=ex_value.code, + explanation=ex_value.format_message())) + + # Under python 2.6, TypeError's exception value is actually a string, + # so test # here via ex_type instead: + # http://bugs.python.org/issue7853 + elif issubclass(ex_type, TypeError): + exc_info = (ex_type, ex_value, ex_traceback) + LOG.error(_('Exception handling resource: %s') % ex_value, + exc_info=exc_info) + raise Fault(webob.exc.HTTPBadRequest()) + elif isinstance(ex_value, Fault): + LOG.info(_("Fault thrown: %s"), unicode(ex_value)) + raise ex_value + elif isinstance(ex_value, webob.exc.HTTPException): + LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) + raise Fault(ex_value) + + # We didn't handle the exception + return False + + +class Resource(wsgi.Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + Exceptions derived from webob.exc.HTTPException will be automatically + wrapped in Fault() to provide API friendly error responses. + + """ + + def __init__(self, controller, action_peek=None, inherits=None, + **deserializers): + """:param controller: object that implement methods created by routes + lib + :param action_peek: dictionary of routines for peeking into an + action request body to determine the + desired action + :param inherits: another resource object that this resource should + inherit extensions from. Any action extensions that + are applied to the parent resource will also apply + to this resource. + """ + + self.controller = controller + + default_deserializers = dict(xml=XMLDeserializer, + json=JSONDeserializer) + default_deserializers.update(deserializers) + + self.default_deserializers = default_deserializers + self.default_serializers = dict(xml=XMLDictSerializer, + json=JSONDictSerializer) + + self.action_peek = dict(xml=action_peek_xml, + json=action_peek_json) + self.action_peek.update(action_peek or {}) + + # Copy over the actions dictionary + self.wsgi_actions = {} + if controller: + self.register_actions(controller) + + # Save a mapping of extensions + self.wsgi_extensions = {} + self.wsgi_action_extensions = {} + self.inherits = inherits + + def register_actions(self, controller): + """Registers controller actions with this resource.""" + + actions = getattr(controller, 'wsgi_actions', {}) + for key, method_name in actions.items(): + self.wsgi_actions[key] = getattr(controller, method_name) + + def register_extensions(self, controller): + """Registers controller extensions with this resource.""" + + extensions = getattr(controller, 'wsgi_extensions', []) + for method_name, action_name in extensions: + # Look up the extending method + extension = getattr(controller, method_name) + + if action_name: + # Extending an action... + if action_name not in self.wsgi_action_extensions: + self.wsgi_action_extensions[action_name] = [] + self.wsgi_action_extensions[action_name].append(extension) + else: + # Extending a regular method + if method_name not in self.wsgi_extensions: + self.wsgi_extensions[method_name] = [] + self.wsgi_extensions[method_name].append(extension) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + + # NOTE(Vek): Check for get_action_args() override in the + # controller + if hasattr(self.controller, 'get_action_args'): + return self.controller.get_action_args(request_environment) + + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except (KeyError, IndexError, AttributeError): + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + def get_body(self, request): + try: + content_type = request.get_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + return None, '' + + return content_type, request.body + + def deserialize(self, meth, content_type, body): + meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in meth_deserializers: + deserializer = meth_deserializers[mtype] + else: + deserializer = self.default_deserializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + if (hasattr(deserializer, 'want_controller') + and deserializer.want_controller): + return deserializer(self.controller).deserialize(body) + else: + return deserializer().deserialize(body) + + def pre_process_extensions(self, extensions, request, action_args): + # List of callables for post-processing extensions + post = [] + + for ext in extensions: + if inspect.isgeneratorfunction(ext): + response = None + + # If it's a generator function, the part before the + # yield is the preprocessing stage + try: + with ResourceExceptionHandler(): + gen = ext(req=request, **action_args) + response = gen.next() + except Fault as ex: + response = ex + + # We had a response... + if response: + return response, [] + + # No response, queue up generator for post-processing + post.append(gen) + else: + # Regular functions only perform post-processing + post.append(ext) + + # Run post-processing in the reverse order + return None, reversed(post) + + def post_process_extensions(self, extensions, resp_obj, request, + action_args): + for ext in extensions: + response = None + if inspect.isgenerator(ext): + # If it's a generator, run the second half of + # processing + try: + with ResourceExceptionHandler(): + response = ext.send(resp_obj) + except StopIteration: + # Normal exit of generator + continue + except Fault as ex: + response = ex + else: + # Regular functions get post-processing... + try: + with ResourceExceptionHandler(): + response = ext(req=request, resp_obj=resp_obj, + **action_args) + except Fault as ex: + response = ex + + # We had a response... + if response: + return response + + return None + + def _should_have_body(self, request): + return request.method in _METHODS_WITH_BODY + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + # Identify the action, its arguments, and the requested + # content type + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + content_type, body = self.get_body(request) + accept = request.best_match_content_type() + + # NOTE(Vek): Splitting the function up this way allows for + # auditing by external tools that wrap the existing + # function. If we try to audit __call__(), we can + # run into troubles due to the @webob.dec.wsgify() + # decorator. + return self._process_stack(request, action, action_args, + content_type, body, accept) + + def _process_stack(self, request, action, action_args, + content_type, body, accept): + """Implement the processing stack.""" + + # Get the implementing method + try: + meth, extensions = self.get_method(request, action, + content_type, body) + except (AttributeError, TypeError): + return Fault(webob.exc.HTTPNotFound()) + except KeyError as ex: + msg = _("There is no such action: %s") % ex.args[0] + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + if body: + msg = _("Action: '%(action)s', body: " + "%(body)s") % {'action': action, + 'body': unicode(body, 'utf-8')} + LOG.debug(logging.mask_password(msg)) + LOG.debug(_("Calling method '%(meth)s' (Content-type='%(ctype)s', " + "Accept='%(accept)s')"), + {'meth': str(meth), + 'ctype': content_type, + 'accept': accept}) + + # Now, deserialize the request body... + try: + contents = {} + if self._should_have_body(request): + #allow empty body with PUT and POST + if request.content_length == 0: + contents = {'body': None} + else: + contents = self.deserialize(meth, content_type, body) + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Update the action args + action_args.update(contents) + + project_id = action_args.pop("project_id", None) + context = request.environ.get('rack.context') + if (context and project_id and (project_id != context.project_id)): + msg = _("Malformed request URL: URL's project_id '%(project_id)s'" + " doesn't match Context's project_id" + " '%(context_project_id)s'") % \ + {'project_id': project_id, + 'context_project_id': context.project_id} + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Run pre-processing extensions + response, post = self.pre_process_extensions(extensions, + request, action_args) + + if not response: + try: + with ResourceExceptionHandler(): + action_result = self.dispatch(meth, request, action_args) + except Fault as ex: + response = ex + + if not response: + # No exceptions; convert action_result into a + # ResponseObject + resp_obj = None + if type(action_result) is dict or action_result is None: + resp_obj = ResponseObject(action_result) + elif isinstance(action_result, ResponseObject): + resp_obj = action_result + else: + response = action_result + + # Run post-processing extensions + if resp_obj: + # Do a preserialize to set up the response object + serializers = getattr(meth, 'wsgi_serializers', {}) + resp_obj._bind_method_serializers(serializers) + if hasattr(meth, 'wsgi_code'): + resp_obj._default_code = meth.wsgi_code + resp_obj.preserialize(accept, self.default_serializers) + + # Process post-processing extensions + response = self.post_process_extensions(post, resp_obj, + request, action_args) + + if resp_obj and not response: + response = resp_obj.serialize(request, accept, + self.default_serializers) + + if hasattr(response, 'headers'): + if context: + response.headers.add('x-compute-request-id', + context.request_id) + + for hdr, val in response.headers.items(): + # Headers must be utf-8 strings + response.headers[hdr] = utils.utf8(str(val)) + + return response + + def get_method(self, request, action, content_type, body): + meth, extensions = self._get_method(request, + action, + content_type, + body) + if self.inherits: + _meth, parent_ext = self.inherits.get_method(request, + action, + content_type, + body) + extensions.extend(parent_ext) + return meth, extensions + + def _get_method(self, request, action, content_type, body): + """Look up the action-specific method and its extensions.""" + + # Look up the method + try: + if not self.controller: + meth = getattr(self, action) + else: + meth = getattr(self.controller, action) + except AttributeError: + if (not self.wsgi_actions or + action not in _ROUTES_METHODS + ['action']): + # Propagate the error + raise + else: + return meth, self.wsgi_extensions.get(action, []) + + if action == 'action': + # OK, it's an action; figure out which action... + mtype = _MEDIA_TYPE_MAP.get(content_type) + action_name = self.action_peek[mtype](body) + else: + action_name = action + + # Look up the action method + return (self.wsgi_actions[action_name], + self.wsgi_action_extensions.get(action_name, [])) + + def dispatch(self, method, request, action_args): + """Dispatch a call to the action-specific method.""" + + return method(req=request, **action_args) + + +def action(name): + """Mark a function as an action. + + The given name will be taken as the action key in the body. + + This is also overloaded to allow extensions to provide + non-extending definitions of create and delete operations. + """ + + def decorator(func): + func.wsgi_action = name + return func + return decorator + + +def extends(*args, **kwargs): + """Indicate a function extends an operation. + + Can be used as either:: + + @extends + def index(...): + pass + + or as:: + + @extends(action='resize') + def _action_resize(...): + pass + """ + + def decorator(func): + # Store enough information to find what we're extending + func.wsgi_extends = (func.__name__, kwargs.get('action')) + return func + + # If we have positional arguments, call the decorator + if args: + return decorator(*args) + + # OK, return the decorator instead + return decorator + + +class ControllerMetaclass(type): + """Controller metaclass. + + This metaclass automates the task of assembling a dictionary + mapping action keys to method names. + """ + + def __new__(mcs, name, bases, cls_dict): + """Adds the wsgi_actions dictionary to the class.""" + + # Find all actions + actions = {} + extensions = [] + # start with wsgi actions from base classes + for base in bases: + actions.update(getattr(base, 'wsgi_actions', {})) + for key, value in cls_dict.items(): + if not callable(value): + continue + if getattr(value, 'wsgi_action', None): + actions[value.wsgi_action] = key + elif getattr(value, 'wsgi_extends', None): + extensions.append(value.wsgi_extends) + + # Add the actions and extensions to the class dict + cls_dict['wsgi_actions'] = actions + cls_dict['wsgi_extensions'] = extensions + + return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, + cls_dict) + + +@six.add_metaclass(ControllerMetaclass) +class Controller(object): + """Default controller.""" + + _view_builder_class = None + + def __init__(self, view_builder=None): + """Initialize controller with a view builder instance.""" + if view_builder: + self._view_builder = view_builder + elif self._view_builder_class: + self._view_builder = self._view_builder_class() + else: + self._view_builder = None + + @staticmethod + def is_valid_body(body, entity_name): + if not (body and entity_name in body): + return False + + def is_dict(d): + try: + d.get(None) + return True + except AttributeError: + return False + + if not is_dict(body[entity_name]): + return False + + return True + + +class Fault(webob.exc.HTTPException): + """Wrap webob.exc.HTTPException to provide API friendly response.""" + + _fault_names = { + 400: "badRequest", + 401: "unauthorized", + 403: "forbidden", + 404: "itemNotFound", + 405: "badMethod", + 409: "conflictingRequest", + 413: "overLimit", + 415: "badMediaType", + 429: "overLimit", + 501: "notImplemented", + 503: "serviceUnavailable"} + + def __init__(self, exception): + """Create a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + for key, value in self.wrapped_exc.headers.items(): + self.wrapped_exc.headers[key] = str(value) + self.status_int = exception.status_int + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + + user_locale = req.best_match_language() + # Replace the body with fault details. + code = self.wrapped_exc.status_int + fault_name = self._fault_names.get(code, "computeFault") + explanation = self.wrapped_exc.explanation + LOG.debug(_("Returning %(code)s to user: %(explanation)s"), + {'code': code, 'explanation': explanation}) + + explanation = gettextutils.translate(explanation, + user_locale) + fault_data = { + fault_name: { + 'code': code, + 'message': explanation}} + if code == 413 or code == 429: + retry = self.wrapped_exc.headers.get('Retry-After', None) + if retry: + fault_data[fault_name]['retryAfter'] = retry + + # 'code' is an attribute on the fault tag itself + metadata = {'attributes': {fault_name: 'code'}} + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V11) + + content_type = req.best_match_content_type() + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) + self.wrapped_exc.content_type = content_type + _set_request_id_header(req, self.wrapped_exc.headers) + + return self.wrapped_exc + + def __str__(self): + return self.wrapped_exc.__str__() + + +class RateLimitFault(webob.exc.HTTPException): + """Rate-limited request response.""" + + def __init__(self, message, details, retry_time): + """Initialize new `RateLimitFault` with relevant information.""" + hdrs = RateLimitFault._retry_after(retry_time) + self.wrapped_exc = webob.exc.HTTPTooManyRequests(headers=hdrs) + self.content = { + "overLimit": { + "code": self.wrapped_exc.status_int, + "message": message, + "details": details, + "retryAfter": hdrs['Retry-After'], + }, + } + + @staticmethod + def _retry_after(retry_time): + delay = int(math.ceil(retry_time - time.time())) + retry_after = delay if delay > 0 else 0 + headers = {'Retry-After': '%d' % retry_after} + return headers + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """Return the wrapped exception with a serialized body conforming + to our error format. + """ + user_locale = request.best_match_language() + content_type = request.best_match_content_type() + metadata = {"attributes": {"overLimit": ["code", "retryAfter"]}} + + self.content['overLimit']['message'] = \ + gettextutils.translate( + self.content['overLimit']['message'], + user_locale) + self.content['overLimit']['details'] = \ + gettextutils.translate( + self.content['overLimit']['details'], + user_locale) + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V11) + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + content = serializer.serialize(self.content) + self.wrapped_exc.body = content + self.wrapped_exc.content_type = content_type + + return self.wrapped_exc + + +def _set_request_id_header(req, headers): + context = req.environ.get('rack.context') + if context: + headers['x-compute-request-id'] = context.request_id diff --git a/rack/api/xmlutil.py b/rack/api/xmlutil.py new file mode 100644 index 0000000..7704a64 --- /dev/null +++ b/rack/api/xmlutil.py @@ -0,0 +1,993 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os.path + +from lxml import etree +import six +from xml.dom import minidom +from xml.parsers import expat +from xml import sax +from xml.sax import expatreader + +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack import utils + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' + + +def validate_schema(xml, schema_name, version='v1.1'): + if isinstance(xml, str): + xml = etree.fromstring(xml) + base_path = 'rack/api/openstack/compute/schemas/' + if schema_name not in ('atom', 'atom-link'): + base_path += '%s/' % version + schema_path = os.path.join(utils.rackdir(), + '%s%s.rng' % (base_path, schema_name)) + schema_doc = etree.parse(schema_path) + relaxng = etree.RelaxNG(schema_doc) + relaxng.assertValid(xml) + + +class Selector(object): + """Selects datum to operate on from an object.""" + + def __init__(self, *chain): + """Initialize the selector. + + Each argument is a subsequent index into the object. + """ + + self.chain = chain + + def __repr__(self): + """Return a representation of the selector.""" + + return "Selector" + repr(self.chain) + + def __call__(self, obj, do_raise=False): + """Select a datum to operate on. + + Selects the relevant datum within the object. + + :param obj: The object from which to select the object. + :param do_raise: If False (the default), return None if the + indexed datum does not exist. Otherwise, + raise a KeyError. + """ + + # Walk the selector list + for elem in self.chain: + # If it's callable, call it + if callable(elem): + obj = elem(obj) + else: + if obj == '': + return '' + # Use indexing + try: + obj = obj[elem] + except (KeyError, IndexError): + # No sense going any further + if do_raise: + # Convert to a KeyError, for consistency + raise KeyError(elem) + return None + + # Return the finally-selected object + return obj + + +def get_items(obj): + """Get items in obj.""" + + return list(obj.items()) + + +def get_items_without_dict(obj): + """Get items in obj but omit any items containing a dict.""" + + obj_list = list(obj.items()) + for item in obj_list: + if isinstance(list(item)[1], dict): + obj_list.remove(item) + return obj_list + + +class EmptyStringSelector(Selector): + """Returns the empty string if Selector would return None.""" + def __call__(self, obj, do_raise=False): + """Returns empty string if the selected value does not exist.""" + + try: + return super(EmptyStringSelector, self).__call__(obj, True) + except KeyError: + return "" + + +class ConstantSelector(object): + """Returns a constant.""" + + def __init__(self, value): + """Initialize the selector. + + :param value: The value to return. + """ + + self.value = value + + def __repr__(self): + """Return a representation of the selector.""" + + return repr(self.value) + + def __call__(self, _obj, _do_raise=False): + """Select a datum to operate on. + + Returns a constant value. Compatible with + Selector.__call__(). + """ + + return self.value + + +class TemplateElement(object): + """Represent an element in the template.""" + + def __init__(self, tag, attrib=None, selector=None, subselector=None, + colon_ns=False, **extra): + """Initialize an element. + + Initializes an element in the template. Keyword arguments + specify attributes to be set on the element; values must be + callables. See TemplateElement.set() for more information. + + :param tag: The name of the tag to create. + :param attrib: An optional dictionary of element attributes. + :param selector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + :param subselector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + This is used to further refine the datum + object returned by selector in the event + that it is a list of objects. + :colon_ns: An optional flag indicating whether to support k:v + type tagname, if True the k:v type tagname will + be supported by adding the k into the namespace. + """ + + # Convert selector into a Selector + if selector is None: + selector = Selector() + elif not callable(selector): + selector = Selector(selector) + + # Convert subselector into a Selector + if subselector is not None and not callable(subselector): + subselector = Selector(subselector) + + self.tag = tag + self.selector = selector + self.subselector = subselector + self.attrib = {} + self._text = None + self._children = [] + self._childmap = {} + self.colon_ns = colon_ns + + # Run the incoming attributes through set() so that they + # become selectorized + if not attrib: + attrib = {} + attrib.update(extra) + for k, v in attrib.items(): + self.set(k, v) + + def __repr__(self): + """Return a representation of the template element.""" + + return ('<%s.%s %r at %#x>' % + (self.__class__.__module__, self.__class__.__name__, + self.tag, id(self))) + + def __len__(self): + """Return the number of child elements.""" + + return len(self._children) + + def __contains__(self, key): + """Determine whether a child node named by key exists.""" + + return key in self._childmap + + def __getitem__(self, idx): + """Retrieve a child node by index or name.""" + + if isinstance(idx, six.string_types): + # Allow access by node name + return self._childmap[idx] + else: + return self._children[idx] + + def append(self, elem): + """Append a child to the element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.append(elem) + self._childmap[elem.tag] = elem + + def extend(self, elems): + """Append children to the element.""" + + # Pre-evaluate the elements + elemmap = {} + elemlist = [] + for elem in elems: + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap or elem.tag in elemmap: + raise KeyError(elem.tag) + + elemmap[elem.tag] = elem + elemlist.append(elem) + + # Update the children + self._children.extend(elemlist) + self._childmap.update(elemmap) + + def insert(self, idx, elem): + """Insert a child element at the given index.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.insert(idx, elem) + self._childmap[elem.tag] = elem + + def remove(self, elem): + """Remove a child element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Check if element exists + if elem.tag not in self._childmap or self._childmap[elem.tag] != elem: + raise ValueError(_('element is not a child')) + + self._children.remove(elem) + del self._childmap[elem.tag] + + def get(self, key): + """Get an attribute. + + Returns a callable which performs datum selection. + + :param key: The name of the attribute to get. + """ + + return self.attrib[key] + + def set(self, key, value=None): + """Set an attribute. + + :param key: The name of the attribute to set. + + :param value: A callable taking an object and optional boolean + do_raise indicator and returning the datum bound + to the attribute. If None, a Selector() will be + constructed from the key. If a string, a + Selector() will be constructed from the string. + """ + + # Convert value to a selector + if value is None: + value = Selector(key) + elif not callable(value): + value = Selector(value) + + self.attrib[key] = value + + def keys(self): + """Return the attribute names.""" + + return self.attrib.keys() + + def items(self): + """Return the attribute names and values.""" + + return self.attrib.items() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # We are a template element + return self + + def wrap(self): + """Wraps a template element to return a template.""" + + # Wrap in a basic Template + return Template(self) + + def apply(self, elem, obj): + """Apply text and attributes to an etree.Element. + + Applies the text and attribute instructions in the template + element to an etree.Element instance. + + :param elem: An etree.Element instance. + :param obj: The base object associated with this template + element. + """ + + # Start with the text... + if self.text is not None: + elem.text = unicode(self.text(obj)) + + # Now set up all the attributes... + for key, value in self.attrib.items(): + try: + elem.set(key, unicode(value(obj, True))) + except KeyError: + # Attribute has no value, so don't include it + pass + + def _render(self, parent, datum, patches, nsmap): + """Internal rendering. + + Renders the template node into an etree.Element object. + Returns the etree.Element object. + + :param parent: The parent etree.Element instance. + :param datum: The datum associated with this template element. + :param patches: A list of other template elements that must + also be applied. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance. + """ + + # Allocate a node + if callable(self.tag): + tagname = self.tag(datum) + else: + tagname = self.tag + + if self.colon_ns: + if ':' in tagname: + if nsmap is None: + nsmap = {} + colon_key, colon_name = tagname.split(':') + nsmap[colon_key] = colon_key + tagname = '{%s}%s' % (colon_key, colon_name) + + elem = etree.Element(tagname, nsmap=nsmap) + + # If we have a parent, append the node to the parent + if parent is not None: + parent.append(elem) + + # If the datum is None, do nothing else + if datum is None: + return elem + + # Apply this template element to the element + self.apply(elem, datum) + + # Additionally, apply the patches + for patch in patches: + patch.apply(elem, datum) + + # We have fully rendered the element; return it + return elem + + def render(self, parent, obj, patches=[], nsmap=None): + """Render an object. + + Renders an object against this template node. Returns a list + of two-item tuples, where the first item is an etree.Element + instance and the second item is the datum associated with that + instance. + + :param parent: The parent for the etree.Element instances. + :param obj: The object to render this template element + against. + :param patches: A list of other template elements to apply + when rendering this template element. + :param nsmap: An optional namespace dictionary to attach to + the etree.Element instances. + """ + + # First, get the datum we're rendering + data = None if obj is None else self.selector(obj) + + # Check if we should render at all + if not self.will_render(data): + return [] + elif data is None: + return [(self._render(parent, None, patches, nsmap), None)] + + # Make the data into a list if it isn't already + if not isinstance(data, list): + data = [data] + elif parent is None: + raise ValueError(_('root element selecting a list')) + + # Render all the elements + elems = [] + for datum in data: + if self.subselector is not None: + datum = self.subselector(datum) + elems.append((self._render(parent, datum, patches, nsmap), datum)) + + # Return all the elements rendered, as well as the + # corresponding datum for the next step down the tree + return elems + + def will_render(self, datum): + """Hook method. + + An overridable hook method to determine whether this template + element will be rendered at all. By default, returns False + (inhibiting rendering) if the datum is None. + + :param datum: The datum associated with this template element. + """ + + # Don't render if datum is None + return datum is not None + + def _text_get(self): + """Template element text. + + Either None or a callable taking an object and optional + boolean do_raise indicator and returning the datum bound to + the text of the template element. + """ + + return self._text + + def _text_set(self, value): + # Convert value to a selector + if value is not None and not callable(value): + value = Selector(value) + + self._text = value + + def _text_del(self): + self._text = None + + text = property(_text_get, _text_set, _text_del) + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template rooted at this + element as a string, suitable for inclusion in debug logs. + """ + + # Build the inner contents of the tag... + contents = [self.tag, '!selector=%r' % self.selector] + + # Add the text... + if self.text is not None: + contents.append('!text=%r' % self.text) + + # Add all the other attributes + for key, value in self.attrib.items(): + contents.append('%s=%r' % (key, value)) + + # If there are no children, return it as a closed tag + if len(self) == 0: + return '<%s/>' % ' '.join([str(i) for i in contents]) + + # OK, recurse to our children + children = [c.tree() for c in self] + + # Return the result + return ('<%s>%s' % + (' '.join(contents), ''.join(children), self.tag)) + + +def SubTemplateElement(parent, tag, attrib=None, selector=None, + subselector=None, colon_ns=False, **extra): + """Create a template element as a child of another. + + Corresponds to the etree.SubElement interface. Parameters are as + for TemplateElement, with the addition of the parent. + """ + + # Convert attributes + attrib = attrib or {} + attrib.update(extra) + + # Get a TemplateElement + elem = TemplateElement(tag, attrib=attrib, selector=selector, + subselector=subselector, colon_ns=colon_ns) + + # Append the parent safely + if parent is not None: + parent.append(elem) + + return elem + + +class Template(object): + """Represent a template.""" + + def __init__(self, root, nsmap=None): + """Initialize a template. + + :param root: The root element of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + self.root = root.unwrap() if root is not None else None + self.nsmap = nsmap or {} + self.serialize_options = dict(encoding='UTF-8', xml_declaration=True) + + def _serialize(self, parent, obj, siblings, nsmap=None): + """Internal serialization. + + Recursive routine to build a tree of etree.Element instances + from an object based on the template. Returns the first + etree.Element instance rendered, or None. + + :param parent: The parent etree.Element instance. Can be + None. + :param obj: The object to render. + :param siblings: The TemplateElement instances against which + to render the object. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance + rendered. + """ + + # First step, render the element + elems = siblings[0].render(parent, obj, siblings[1:], nsmap) + + # Now, recurse to all child elements + seen = set() + for idx, sibling in enumerate(siblings): + for child in sibling: + # Have we handled this child already? + if child.tag in seen: + continue + seen.add(child.tag) + + # Determine the child's siblings + nieces = [child] + for sib in siblings[idx + 1:]: + if child.tag in sib: + nieces.append(sib[child.tag]) + + # Now we recurse for every data element + for elem, datum in elems: + self._serialize(elem, datum, nieces) + + # Return the first element; at the top level, this will be the + # root element + if elems: + return elems[0][0] + + def serialize(self, obj, *args, **kwargs): + """Serialize an object. + + Serializes an object against the template. Returns a string + with the serialized XML. Positional and keyword arguments are + passed to etree.tostring(). + + :param obj: The object to serialize. + """ + + elem = self.make_tree(obj) + if elem is None: + return '' + + for k, v in self.serialize_options.items(): + kwargs.setdefault(k, v) + + # Serialize it into XML + return etree.tostring(elem, *args, **kwargs) + + def make_tree(self, obj): + """Create a tree. + + Serializes an object against the template. Returns an Element + node with appropriate children. + + :param obj: The object to serialize. + """ + + # If the template is empty, return the empty string + if self.root is None: + return None + + # Get the siblings and nsmap of the root element + siblings = self._siblings() + nsmap = self._nsmap() + + # Form the element tree + return self._serialize(None, obj, siblings, nsmap) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. By default, this is the root element itself. + """ + + return [self.root] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + """ + + return self.nsmap.copy() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # Return the root element + return self.root + + def wrap(self): + """Wraps a template element to return a template.""" + + # We are a template + return self + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. + + :param master: The master template to test. + """ + + return True + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template as a string, suitable + for inclusion in debug logs. + """ + + return "%r: %s" % (self, self.root.tree()) + + +class MasterTemplate(Template): + """Represent a master template. + + Master templates are versioned derivatives of templates that + additionally allow slave templates to be attached. Slave + templates allow modification of the serialized result without + directly changing the master. + """ + + def __init__(self, root, version, nsmap=None): + """Initialize a master template. + + :param root: The root element of the template. + :param version: The version number of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(MasterTemplate, self).__init__(root, nsmap) + self.version = version + self.slaves = [] + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object version %s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.version, id(self))) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. This is the root element plus the root elements of + all the slave templates. + """ + + return [self.root] + [slave.root for slave in self.slaves] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + The namespace dictionary is computed by taking the master + template's namespace dictionary and updating it from all the + slave templates. + """ + + nsmap = self.nsmap.copy() + for slave in self.slaves: + nsmap.update(slave._nsmap()) + return nsmap + + def attach(self, *slaves): + """Attach one or more slave templates. + + Attaches one or more slave templates to the master template. + Slave templates must have a root element with the same tag as + the master template. The slave template's apply() method will + be called to determine if the slave should be applied to this + master; if it returns False, that slave will be skipped. + (This allows filtering of slaves based on the version of the + master template.) + """ + + slave_list = [] + for slave in slaves: + slave = slave.wrap() + + # Make sure we have a tree match + if slave.root.tag != self.root.tag: + msg = _("Template tree mismatch; adding slave %(slavetag)s to " + "master %(mastertag)s") % {'slavetag': slave.root.tag, + 'mastertag': self.root.tag} + raise ValueError(msg) + + # Make sure slave applies to this template + if not slave.apply(self): + continue + + slave_list.append(slave) + + # Add the slaves + self.slaves.extend(slave_list) + + def copy(self): + """Return a copy of this master template.""" + + # Return a copy of the MasterTemplate + tmp = self.__class__(self.root, self.version, self.nsmap) + tmp.slaves = self.slaves[:] + return tmp + + +class SlaveTemplate(Template): + """Represent a slave template. + + Slave templates are versioned derivatives of templates. Each + slave has a minimum version and optional maximum version of the + master template to which they can be attached. + """ + + def __init__(self, root, min_vers, max_vers=None, nsmap=None): + """Initialize a slave template. + + :param root: The root element of the template. + :param min_vers: The minimum permissible version of the master + template for this slave template to apply. + :param max_vers: An optional upper bound for the master + template version. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(SlaveTemplate, self).__init__(root, nsmap) + self.min_vers = min_vers + self.max_vers = max_vers + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object versions %s-%s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.min_vers, self.max_vers, id(self))) + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. This + version requires the master template to have a version number + between min_vers and max_vers. + + :param master: The master template to test. + """ + + # Does the master meet our minimum version requirement? + if master.version < self.min_vers: + return False + + # How about our maximum version requirement? + if self.max_vers is not None and master.version > self.max_vers: + return False + + return True + + +class TemplateBuilder(object): + """Template builder. + + This class exists to allow templates to be lazily built without + having to build them each time they are needed. It must be + subclassed, and the subclass must implement the construct() + method, which must return a Template (or subclass) instance. The + constructor will always return the template returned by + construct(), or, if it has a copy() method, a copy of that + template. + """ + + _tmpl = None + + def __new__(cls, copy=True): + """Construct and return a template. + + :param copy: If True (the default), a copy of the template + will be constructed and returned, if possible. + """ + + # Do we need to construct the template? + if cls._tmpl is None: + tmp = super(TemplateBuilder, cls).__new__(cls) + + # Construct the template + cls._tmpl = tmp.construct() + + # If the template has a copy attribute, return the result of + # calling it + if copy and hasattr(cls._tmpl, 'copy'): + return cls._tmpl.copy() + + # Return the template + return cls._tmpl + + def construct(self): + """Construct a template. + + Called to construct a template instance, which it must return. + Only called once. + """ + + raise NotImplementedError(_("subclasses must implement construct()!")) + + +def make_links(parent, selector=None): + """Attach an Atom element to the parent.""" + + elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM, + selector=selector) + elem.set('rel') + elem.set('type') + elem.set('href') + + # Just for completeness... + return elem + + +def make_flat_dict(name, selector=None, subselector=None, + ns=None, colon_ns=False, root=None, + ignore_sub_dicts=False): + """Utility for simple XML templates that traditionally used + XMLDictSerializer with no metadata. Returns a template element + where the top-level element has the given tag name, and where + sub-elements have tag names derived from the object's keys and + text derived from the object's values. + + :param root: if None, this will create the root. + :param ignore_sub_dicts: If True, ignores any dict objects inside the + object. If False, causes an error if there is a + dict object present. + """ + + # Set up the names we need... + if ns is None: + elemname = name + tagname = Selector(0) + else: + elemname = '{%s}%s' % (ns, name) + tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0]) + + if selector is None: + selector = name + if not root: + # Build the root element + root = TemplateElement(elemname, selector=selector, + subselector=subselector, colon_ns=colon_ns) + choice = get_items if ignore_sub_dicts is False else get_items_without_dict + # Build an element to represent all the keys and values + elem = SubTemplateElement(root, tagname, selector=choice, + colon_ns=colon_ns) + elem.text = 1 + + # Return the template + return root + + +class ProtectedExpatParser(expatreader.ExpatParser): + """An expat parser which disables DTD's and entities by default.""" + + def __init__(self, forbid_dtd=True, forbid_entities=True, + *args, **kwargs): + # Python 2.x old style class + expatreader.ExpatParser.__init__(self, *args, **kwargs) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities + + def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): + raise ValueError("Inline DTD forbidden") + + def entity_decl(self, entityName, is_parameter_entity, value, base, + systemId, publicId, notationName): + raise ValueError(" entity declaration forbidden") + + def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): + # expat 1.2 + raise ValueError(" unparsed entity forbidden") + + def external_entity_ref(self, context, base, systemId, publicId): + raise ValueError(" external entity forbidden") + + def notation_decl(self, name, base, sysid, pubid): + raise ValueError(" notation forbidden") + + def reset(self): + expatreader.ExpatParser.reset(self) + if self.forbid_dtd: + self._parser.StartDoctypeDeclHandler = self.start_doctype_decl + self._parser.EndDoctypeDeclHandler = None + if self.forbid_entities: + self._parser.EntityDeclHandler = self.entity_decl + self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl + self._parser.ExternalEntityRefHandler = self.external_entity_ref + self._parser.NotationDeclHandler = self.notation_decl + try: + self._parser.SkippedEntityHandler = None + except AttributeError: + # some pyexpat versions do not support SkippedEntity + pass + + +def safe_minidom_parse_string(xml_string): + """Parse an XML string using minidom safely.""" + try: + return minidom.parseString(xml_string, parser=ProtectedExpatParser()) + except (sax.SAXParseException, ValueError, + expat.ExpatError, LookupError) as e: + #NOTE(Vijaya Erukala): XML input such as + # + # raises LookupError: unknown encoding: TF-8 + raise exception.MalformedRequestBody(reason=str(e)) diff --git a/rack/baserpc.py b/rack/baserpc.py new file mode 100644 index 0000000..3499a35 --- /dev/null +++ b/rack/baserpc.py @@ -0,0 +1,81 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Base RPC client and server common to all services. +""" + +from oslo.config import cfg +from oslo import messaging + +from rack.openstack.common import jsonutils +from rack import rpc + + +CONF = cfg.CONF +rpcapi_cap_opt = cfg.StrOpt('baseapi', + help='Set a version cap for messages sent to the base api in any ' + 'service') +CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') + +_NAMESPACE = 'baseapi' + + +class BaseAPI(object): + """Client side of the base rpc API. + + API version history: + + 1.0 - Initial version. + 1.1 - Add get_backdoor_port + """ + + VERSION_ALIASES = { + # baseapi was added in havana + } + + def __init__(self, topic): + super(BaseAPI, self).__init__() + target = messaging.Target(topic=topic, + namespace=_NAMESPACE, + version='1.0') + version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.baseapi, + CONF.upgrade_levels.baseapi) + self.client = rpc.get_client(target, version_cap=version_cap) + + def ping(self, context, arg, timeout=None): + arg_p = jsonutils.to_primitive(arg) + cctxt = self.client.prepare(timeout=timeout) + return cctxt.call(context, 'ping', arg=arg_p) + + def get_backdoor_port(self, context, host): + cctxt = self.client.prepare(server=host, version='1.1') + return cctxt.call(context, 'get_backdoor_port') + + +class BaseRPCAPI(object): + """Server side of the base RPC API.""" + + target = messaging.Target(namespace=_NAMESPACE, version='1.1') + + def __init__(self, service_name, backdoor_port): + self.service_name = service_name + self.backdoor_port = backdoor_port + + def ping(self, context, arg): + resp = {'service': self.service_name, 'arg': arg} + return jsonutils.to_primitive(resp) + + def get_backdoor_port(self, context): + return self.backdoor_port diff --git a/rack/cmd/__init__.py b/rack/cmd/__init__.py new file mode 100644 index 0000000..51d6433 --- /dev/null +++ b/rack/cmd/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +if ('eventlet' in sys.modules and + os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): + raise ImportError('eventlet imported before rack/cmd/__init__ ' + '(env var set to %s)' + % os.environ.get('EVENTLET_NO_GREENDNS')) + +os.environ['EVENTLET_NO_GREENDNS'] = 'yes' + +import eventlet +from rack import debugger + +if debugger.enabled(): + # turn off thread patching to enable the remote debugger + eventlet.monkey_patch(os=False, thread=False) +else: + eventlet.monkey_patch(os=False, thread=False) diff --git a/rack/cmd/api.py b/rack/cmd/api.py new file mode 100644 index 0000000..a4c7002 --- /dev/null +++ b/rack/cmd/api.py @@ -0,0 +1,36 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for RACK API.""" + +import sys + +from oslo.config import cfg + +from rack import config +from rack.openstack.common import log as logging +from rack import service +from rack import utils + +CONF = cfg.CONF + +def main(): + config.parse_args(sys.argv) + logging.setup("rack") + utils.monkey_patch() + + launcher = service.process_launcher() + server = service.WSGIService('rackapi') + launcher.launch_service(server, workers=server.workers or 1) + launcher.wait() diff --git a/rack/cmd/resourceoperator.py b/rack/cmd/resourceoperator.py new file mode 100644 index 0000000..5b6e1ca --- /dev/null +++ b/rack/cmd/resourceoperator.py @@ -0,0 +1,42 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for RACK ResourceOperator.""" + +import sys + +from oslo.config import cfg + +from rack import config +from rack.openstack.common import log as logging +from rack import service +from rack import utils + +CONF = cfg.CONF +CONF.import_opt('resourceoperator_topic', 'rack.resourceoperator.rpcapi') +CONF.import_opt('os_username', 'rack.resourceoperator.openstack') +CONF.import_opt('os_password', 'rack.resourceoperator.openstack') +CONF.import_opt('os_tenant_name', 'rack.resourceoperator.openstack') +CONF.import_opt('os_auth_url', 'rack.resourceoperator.openstack') + + +def main(): + config.parse_args(sys.argv) + logging.setup("rack") + utils.monkey_patch() + + server = service.Service.create(binary='rack-resourceoperator', + topic=CONF.resourceoperator_topic) + service.serve(server) + service.wait() diff --git a/rack/cmd/scheduler.py b/rack/cmd/scheduler.py new file mode 100644 index 0000000..7dfc5c8 --- /dev/null +++ b/rack/cmd/scheduler.py @@ -0,0 +1,37 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Starter script for Rack Scheduler.""" + +import sys + +from oslo.config import cfg + +from rack import config +from rack.openstack.common import log as logging +from rack import service +from rack import utils + +CONF = cfg.CONF +CONF.import_opt('scheduler_topic', 'rack.scheduler.rpcapi') + + +def main(): + config.parse_args(sys.argv) + logging.setup("rack") + utils.monkey_patch() + + server = service.Service.create(binary='rack-scheduler', + topic=CONF.scheduler_topic) + service.serve(server) + service.wait() diff --git a/rack/config.py b/rack/config.py new file mode 100644 index 0000000..e674565 --- /dev/null +++ b/rack/config.py @@ -0,0 +1,35 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + +from rack import debugger +from rack.openstack.common.db import options +from rack import paths +from rack import rpc +from rack import version + +_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('rack.sqlite') + + +def parse_args(argv, default_config_files=None): + options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION, + sqlite_db='rack.sqlite') + rpc.set_defaults(control_exchange='rack') + debugger.register_cli_opts() + cfg.CONF(argv[1:], + project='rack', + version=version.version_string(), + default_config_files=default_config_files) + rpc.init(cfg.CONF) diff --git a/rack/context.py b/rack/context.py new file mode 100644 index 0000000..bced43f --- /dev/null +++ b/rack/context.py @@ -0,0 +1,227 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""RequestContext: context for requests that persist through all of rack.""" + +import copy +import uuid + +import six + +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import local +from rack.openstack.common import log as logging +from rack.openstack.common import timeutils +from rack import policy + + +LOG = logging.getLogger(__name__) + + +def generate_request_id(): + return 'req-' + str(uuid.uuid4()) + + +class RequestContext(object): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", + roles=None, remote_address=None, timestamp=None, + request_id=None, auth_token=None, overwrite=True, + quota_class=None, user_name=None, project_name=None, + service_catalog=None, instance_lock_checked=False, **kwargs): + """:param read_deleted: 'no' indicates deleted records are hidden, + 'yes' indicates deleted records are visible, + 'only' indicates that *only* deleted records are visible. + + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + if kwargs: + LOG.warn(_('Arguments dropped when creating context: %s') % + str(kwargs)) + + self.user_id = user_id + self.project_id = project_id + self.roles = roles or [] + self.read_deleted = read_deleted + self.remote_address = remote_address + if not timestamp: + timestamp = timeutils.utcnow() + if isinstance(timestamp, six.string_types): + timestamp = timeutils.parse_strtime(timestamp) + self.timestamp = timestamp + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + self.auth_token = auth_token + + if service_catalog: + # Only include required parts of service_catalog + self.service_catalog = [s for s in service_catalog + if s.get('type') in ('identity', 'image', 'network', 'compute')] + else: + # if list is empty or none + self.service_catalog = [] + + self.instance_lock_checked = instance_lock_checked + + # NOTE(markmc): this attribute is currently only used by the + # rs_limits turnstile pre-processor. + # See https://lists.launchpad.net/openstack/msg12200.html + self.quota_class = quota_class + self.user_name = user_name + self.project_name = project_name + self.is_admin = is_admin + if self.is_admin is None: + self.is_admin = policy.check_is_admin(self) + if overwrite or not hasattr(local.store, 'context'): + self.update_store() + + def _get_read_deleted(self): + return self._read_deleted + + def _set_read_deleted(self, read_deleted): + if read_deleted not in ('no', 'yes', 'only'): + raise ValueError(_("read_deleted can only be one of 'no', " + "'yes' or 'only', not %r") % read_deleted) + self._read_deleted = read_deleted + + def _del_read_deleted(self): + del self._read_deleted + + read_deleted = property(_get_read_deleted, _set_read_deleted, + _del_read_deleted) + + def update_store(self): + local.store.context = self + + def to_dict(self): + return {'user_id': self.user_id, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'read_deleted': self.read_deleted, + 'roles': self.roles, + 'remote_address': self.remote_address, + 'timestamp': timeutils.strtime(self.timestamp), + 'request_id': self.request_id, + 'auth_token': self.auth_token, + 'quota_class': self.quota_class, + 'user_name': self.user_name, + 'service_catalog': self.service_catalog, + 'project_name': self.project_name, + 'instance_lock_checked': self.instance_lock_checked, + 'tenant': self.tenant, + 'user': self.user} + + @classmethod + def from_dict(cls, values): + values.pop('user', None) + values.pop('tenant', None) + return cls(**values) + + def elevated(self, read_deleted=None, overwrite=False): + """Return a version of this context with admin flag set.""" + context = copy.copy(self) + context.is_admin = True + + if 'admin' not in context.roles: + context.roles.append('admin') + + if read_deleted is not None: + context.read_deleted = read_deleted + + return context + + # NOTE(sirp): the openstack/common version of RequestContext uses + # tenant/user whereas the Rack version uses project_id/user_id. We need + # this shim in order to use context-aware code from openstack/common, like + # logging, until we make the switch to using openstack/common's version of + # RequestContext. + @property + def tenant(self): + return self.project_id + + @property + def user(self): + return self.user_id + + +def get_admin_context(read_deleted="no"): + return RequestContext(user_id=None, + project_id=None, + is_admin=True, + read_deleted=read_deleted, + overwrite=False) + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True + + +def require_admin_context(ctxt): + """Raise exception.AdminRequired() if context is an admin context.""" + if not ctxt.is_admin: + raise exception.AdminRequired() + + +def require_context(ctxt): + """Raise exception.NotAuthorized() if context is not a user or an + admin context. + """ + if not ctxt.is_admin and not is_user_context(ctxt): + raise exception.NotAuthorized() + + +def authorize_project_context(context, project_id): + """Ensures a request has permission to access the given project.""" + if is_user_context(context): + if not context.project_id: + raise exception.NotAuthorized() + elif context.project_id != project_id: + raise exception.NotAuthorized() + + +def authorize_user_context(context, user_id): + """Ensures a request has permission to access the given user.""" + if is_user_context(context): + if not context.user_id: + raise exception.NotAuthorized() + elif context.user_id != user_id: + raise exception.NotAuthorized() + + +def authorize_quota_class_context(context, class_name): + """Ensures a request has permission to access the given quota class.""" + if is_user_context(context): + if not context.quota_class: + raise exception.NotAuthorized() + elif context.quota_class != class_name: + raise exception.NotAuthorized() diff --git a/rack/db/__init__.py b/rack/db/__init__.py new file mode 100644 index 0000000..8efc164 --- /dev/null +++ b/rack/db/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +DB abstraction for Nova +""" + +from rack.db.api import * # noqa diff --git a/rack/db/api.py b/rack/db/api.py new file mode 100644 index 0000000..675c65b --- /dev/null +++ b/rack/db/api.py @@ -0,0 +1,179 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg +from rack.openstack.common.db import api as db_api + + +CONF = cfg.CONF +db_opts = [ + cfg.BoolOpt('enable_new_services', + default=True, + help='Services to be added to the available pool on create') +] +CONF.register_opts(db_opts) +CONF.import_opt('backend', 'rack.openstack.common.db.options', + group='database') + +_BACKEND_MAPPING = {'sqlalchemy': 'rack.db.sqlalchemy.api'} + + +IMPL = db_api.DBAPI(CONF.database.backend, backend_mapping=_BACKEND_MAPPING) + + +def group_get_all(context, filters=None): + return IMPL.group_get_all(context, filters) + + +def group_get_by_gid(context, gid): + return IMPL.group_get_by_gid(context, gid) + + +def group_create(context, values): + return IMPL.group_create(context, values) + + +def group_update(context, values): + return IMPL.group_update(context, values) + +def group_delete(context, gid): + return IMPL.group_delete(context, gid) + + +def service_destroy(context, service_id): + """Destroy the service or raise if it does not exist.""" + return IMPL.service_destroy(context, service_id) + + +def service_get(context, service_id): + """Get a service or raise if it does not exist.""" + return IMPL.service_get(context, service_id) + + +def service_get_by_host_and_topic(context, host, topic): + """Get a service by host it's on and topic it listens to.""" + return IMPL.service_get_by_host_and_topic(context, host, topic) + + +def service_get_all(context, disabled=None): + """Get all services.""" + return IMPL.service_get_all(context, disabled) + + +def service_get_all_by_topic(context, topic): + """Get all services for a given topic.""" + return IMPL.service_get_all_by_topic(context, topic) + + +def service_get_all_by_host(context, host): + """Get all services for a given host.""" + return IMPL.service_get_all_by_host(context, host) + + +def service_get_by_args(context, host, binary): + """Get the state of a service by node name and binary.""" + return IMPL.service_get_by_args(context, host, binary) + + +def service_create(context, values): + """Create a service from the values dictionary.""" + return IMPL.service_create(context, values) + + +def service_update(context, service_id, values): + """Set the given properties on a service and update it. + + Raises NotFound if service does not exist. + + """ + return IMPL.service_update(context, service_id, values) + + +def network_create(context, values): + return IMPL.network_create(context, values) + + +def network_update(context, network_id, values): + IMPL.network_update(context, network_id, values) + + +def network_get_all(context, gid, filters={}): + return IMPL.network_get_all(context, gid, filters) + + +def network_get_by_network_id(context, gid, network_id): + return IMPL.network_get_by_network_id(context, gid, network_id) + + +def network_delete(context, gid, network_id): + return IMPL.network_delete(context, gid, network_id) + + +def keypair_get_all(context, gid, filters={}): + return IMPL.keypair_get_all(context, gid, filters) + + +def keypair_get_by_keypair_id(context, gid, keypair_id): + return IMPL.keypair_get_by_keypair_id(context, gid, keypair_id) + + +def keypair_create(context, values): + return IMPL.keypair_create(context, values) + + +def keypair_update(context, gid, keypair_id, values): + return IMPL.keypair_update(context, gid, keypair_id, values) + + +def keypair_delete(context, gid, keypair_id): + return IMPL.keypair_delete(context, gid, keypair_id) + + +def securitygroup_get_all(context, gid, filters={}): + return IMPL.securitygroup_get_all(context, gid, filters) + + +def securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id): + return IMPL.securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id) + + +def securitygroup_create(context, values): + return IMPL.securitygroup_create(context, values) + + +def securitygroup_update(context, gid, securitygroup_id, values): + return IMPL.securitygroup_update(context, gid, securitygroup_id, values) + + +def securitygroup_delete(context, gid, securitygroup_id): + return IMPL.securitygroup_delete(context, gid, securitygroup_id) + + +def process_get_all(context, gid, filters={}): + return IMPL.process_get_all(context, gid, filters) + + +def process_get_by_pid(context, gid, pid): + return IMPL.process_get_by_pid(context, gid, pid) + + +def process_create(context, values, network_ids, securitygroup_ids): + return IMPL.process_create(context, values, network_ids, securitygroup_ids) + + +def process_update(context, gid, pid, values): + return IMPL.process_update(context, gid, pid, values) + + +def process_delete(context, gid, pid): + return IMPL.process_delete(context, gid, pid) diff --git a/rack/db/base.py b/rack/db/base.py new file mode 100644 index 0000000..55bdd00 --- /dev/null +++ b/rack/db/base.py @@ -0,0 +1,36 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base class for classes that need modular database access.""" + +from oslo.config import cfg + +from rack.openstack.common import importutils + +db_driver_opt = cfg.StrOpt('db_driver', + default='rack.db', + help='The driver to use for database access') + +CONF = cfg.CONF +CONF.register_opt(db_driver_opt) + + +class Base(object): + """DB driver is injected in the init method.""" + + def __init__(self, db_driver=None): + super(Base, self).__init__() + if not db_driver: + db_driver = CONF.db_driver + self.db = importutils.import_module(db_driver) # pylint: disable=C0103 diff --git a/rack/db/migration.py b/rack/db/migration.py new file mode 100644 index 0000000..1092e15 --- /dev/null +++ b/rack/db/migration.py @@ -0,0 +1,37 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Database setup and migration commands.""" + +from rack import utils + + +IMPL = utils.LazyPluggable('backend', + config_group='database', + sqlalchemy='rack.db.sqlalchemy.migration') + + +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(version=version) + + +def db_version(): + """Display the current database version.""" + return IMPL.db_version() + + +def db_initial_version(): + """The starting version for the database.""" + return IMPL.db_initial_version() diff --git a/rack/db/sqlalchemy/__init__.py b/rack/db/sqlalchemy/__init__.py new file mode 100644 index 0000000..a1c0b9a --- /dev/null +++ b/rack/db/sqlalchemy/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import BigInteger +from sqlalchemy.ext.compiler import compiles + + +@compiles(BigInteger, 'sqlite') +def compile_big_int_sqlite(type_, compiler, **kw): + return 'INTEGER' diff --git a/rack/db/sqlalchemy/api.py b/rack/db/sqlalchemy/api.py new file mode 100644 index 0000000..b2f971c --- /dev/null +++ b/rack/db/sqlalchemy/api.py @@ -0,0 +1,647 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import uuid + +import functools +from rack import exception +import rack.context +from rack.db.sqlalchemy import models +from rack.openstack.common.db import exception as db_exc +from rack.openstack.common.db.sqlalchemy import session as db_session +import sys + +from oslo.config import cfg +from rack.openstack.common import jsonutils +from rack.openstack.common import log as logging +from rack.openstack.common import timeutils +from rack.openstack.common.gettextutils import _ + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.import_opt('connection', + 'rack.openstack.common.db.options', + group='database') + +_FACADE = None + + +def _create_facade_lazily(): + global _FACADE + if _FACADE is None: + _FACADE = db_session.EngineFacade( + CONF.database.connection, + **dict(CONF.database.iteritems())) + return _FACADE + + +def get_engine(): + facade = _create_facade_lazily() + return facade.get_engine() + + +def get_session(**kwargs): + facade = _create_facade_lazily() + return facade.get_session(**kwargs) + + +def get_backend(): + return sys.modules[__name__] + + +def group_get_all(context, filters=None): + session = get_session() + filters = filters or {} + query = session.query(models.Group).filter_by(user_id=context.user_id)\ + .filter_by(deleted=0) + if 'project_id' in filters: + query = query.filter_by(project_id=filters['project_id']) + if 'name' in filters: + query = query.filter_by(display_name=filters['name']) + if 'status' in filters: + query = query.filter_by(status=filters['status']) + responce_groups = query.all() + + return [dict(group) for group in responce_groups] + + +def group_get_by_gid(context, gid): + session = get_session() + group = session.query(models.Group)\ + .filter_by(user_id=context.user_id)\ + .filter_by(gid=gid)\ + .filter_by(deleted=0)\ + .first() + + if not group: + raise exception.GroupNotFound(gid=gid) + return dict(group) + + +def require_admin_context(f): + """Decorator to require admin request context. + + The first argument to the wrapped function must be the context. + + """ + + @functools.wraps(f) + def wrapper(*args, **kwargs): + rack.context.require_admin_context(args[0]) + return f(*args, **kwargs) + return wrapper + + +def group_create(context, values): + session = get_session() + group_ref = models.Group() + group_ref.update(values) + group_ref.save(session) + + return dict(group_ref) + + +def group_update(context, values): + session = get_session() + group_ref = session.query(models.Group). \ + filter(models.Group.gid == values["gid"]).first() + if group_ref is None: + raise exception.GroupNotFound(gid=values["gid"]) + + group_ref.update(values) + group_ref.save(session) + + return dict(group_ref) + +def group_delete(context, gid): + session = get_session() + group_ref = session.query(models.Group)\ + .filter_by(deleted=0)\ + .filter_by(gid=gid)\ + .first() + if group_ref is None: + raise exception.GroupNotFound(gid=gid) + + values = { + "status": "DELETING", + "deleted": 1, + "deleted_at": timeutils.utcnow() + } + group_ref.update(values) + group_ref.save(session) + + return dict(group_ref) + +def service_model_query(context, model, *args, **kwargs): + session = kwargs.get('session') or get_session() + read_deleted = kwargs.get('read_deleted') or context.read_deleted + query = session.query(model, *args) + + default_deleted_value = model.__mapper__.c.deleted.default.arg + if read_deleted == 'no': + query = query.filter(model.deleted == default_deleted_value) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter(model.deleted != default_deleted_value) + else: + raise Exception(_("Unrecognized read_deleted value '%s'") + % read_deleted) + + return query + + +@require_admin_context +def service_destroy(context, service_id): + session = get_session() + with session.begin(): + count = service_model_query(context, models.Service, + session=session).\ + filter_by(id=service_id).\ + soft_delete(synchronize_session=False) + + if count == 0: + raise exception.ServiceNotFound(service_id=service_id) + + +@require_admin_context +def service_get(context, service_id): + session = get_session() + service_ref = service_model_query(context, models.Service, + session=session).\ + filter_by(id=service_id).\ + first() + + if not service_ref: + raise exception.ServiceNotFound(service_id=service_id) + + return jsonutils.to_primitive(service_ref) + + +@require_admin_context +def service_get_all(context, disabled=None): + session = get_session() + query = service_model_query(context, models.Service, + session=session) + + if disabled is not None: + query = query.filter_by(disabled=disabled) + + service_refs = query.all() + return jsonutils.to_primitive(service_refs) + + +@require_admin_context +def service_get_all_by_topic(context, topic): + session = get_session() + service_refs = service_model_query(context, models.Service, + session=session, + read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(topic=topic).\ + all() + + return jsonutils.to_primitive(service_refs) + + +@require_admin_context +def service_get_by_host_and_topic(context, host, topic): + session = get_session() + service_ref = service_model_query(context, models.Service, + session=session, + read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + first() + + return jsonutils.to_primitive(service_ref) + + +@require_admin_context +def service_get_all_by_host(context, host): + session = get_session() + service_refs = service_model_query(context, models.Service, + session=session, + read_deleted="no").\ + filter_by(host=host).\ + all() + + return jsonutils.to_primitive(service_refs) + + +@require_admin_context +def service_get_by_args(context, host, binary): + session = get_session() + service_ref = service_model_query(context, models.Service, + session=session).\ + filter_by(host=host).\ + filter_by(binary=binary).\ + first() + + if not service_ref: + raise exception.HostBinaryNotFound(host=host, binary=binary) + + return jsonutils.to_primitive(service_ref) + + +@require_admin_context +def service_create(context, values): + session = get_session() + service_ref = models.Service() + service_ref.update(values) + if not CONF.enable_new_services: + service_ref.disabled = True + try: + service_ref.save(session) + except db_exc.DBDuplicateEntry as e: + if 'binary' in e.columns: + raise exception.ServiceBinaryExists(host=values.get('host'), + binary=values.get('binary')) + raise exception.ServiceTopicExists(host=values.get('host'), + topic=values.get('topic')) + + return jsonutils.to_primitive(service_ref) + + +@require_admin_context +def service_update(context, service_id, values): + session = get_session() + with session.begin(): + service_ref = service_model_query(context, models.Service, + session=session).\ + filter_by(id=service_id).\ + first() + + if not service_ref: + raise exception.ServiceNotFound(service_id=service_id) + + service_ref.update(values) + + return jsonutils.to_primitive(service_ref) + + +def network_create(context, values): + session = get_session() + network_ref = models.Network() + network_ref.update(values) + network_ref.save(session) + + return dict(network_ref) + + +def network_update(context, network_id, values): + session = get_session() + network_ref = session.query(models.Network)\ + .filter(models.Network.deleted == 0)\ + .filter(models.Network.network_id == network_id)\ + .first() + + network_ref.update(values) + network_ref.save(session) + + +def network_get_all(context, gid, filters): + session = get_session() + query = session.query(models.Network)\ + .filter_by(deleted=0)\ + .filter_by(gid=gid) + + if 'network_id' in filters: + query = query.filter_by(network_id=filters['network_id']) + if 'neutron_network_id' in filters: + query = query.filter_by(neutron_network_id=filters['neutron_network_id']) + if 'display_name' in filters: + query = query.filter_by(display_name=filters['display_name']) + if 'status' in filters: + query = query.filter_by(status=filters['status']) + if 'is_admin' in filters: + query = query.filter_by(is_admin=filters['is_admin']) + if 'subnet' in filters: + query = query.filter_by(subnet=filters['subnet']) + if 'ext_router' in filters: + query = query.filter_by(ext_router=filters['ext_router']) + + networks = query.all() + + return [dict(network) for network in networks] + + +def network_get_by_network_id(context, gid, network_id): + session = get_session() + network = session.query(models.Network)\ + .filter_by(deleted=0)\ + .filter_by(gid=gid)\ + .filter_by(network_id=network_id)\ + .first() + if not network: + raise exception.NetworkNotFound(network_id=network_id) + + network_dict = dict(network) + network_dict.update(dict(processes=[dict(process) for process in network.processes])) + + return network_dict + + +def network_delete(context, gid, network_id): + session = get_session() + network_ref = session.query(models.Network)\ + .filter(models.Network.deleted == 0)\ + .filter(models.Network.gid == gid)\ + .filter(models.Network.network_id == network_id)\ + .first() + values = {} + values["deleted"] = 1 + values["deleted_at"] = timeutils.utcnow() + values["status"] = "DELETING" + network_ref.update(values) + network_ref.save(session) + return dict(network_ref) + + +def keypair_get_all(context, gid, filters={}): + session = get_session() + query = session.query(models.Keypair)\ + .filter_by(gid=gid)\ + .filter_by(deleted=0) + if 'keypair_id' in filters: + query = query.filter_by(keypair_id=filters['keypair_id']) + if 'nova_keypair_id' in filters: + query = query.filter_by(nova_keypair_id=filters['nova_keypair_id']) + if 'display_name' in filters: + query = query.filter_by(display_name=filters['display_name']) + if 'status' in filters: + query = query.filter_by(status=filters['status']) + if 'is_default' in filters: + query = query.filter_by(is_default=filters['is_default']) + + responce_keypairs = query.all() + + return [dict(keypair) for keypair in responce_keypairs] + + +def keypair_get_by_keypair_id(context, gid, keypair_id): + session = get_session() + keypair = session.query(models.Keypair)\ + .filter_by(gid=gid)\ + .filter_by(keypair_id=keypair_id)\ + .filter_by(deleted=0)\ + .first() + + if not keypair: + raise exception.KeypairNotFound(keypair_id=keypair_id) + + return dict(keypair) + + +def keypair_create(context, values): + session = get_session() + keypair_ref = models.Keypair() + keypair_ref.update(values) + keypair_ref.save(session) + return dict(keypair_ref) + + +def keypair_update(context, gid, keypair_id, values): + session = get_session() + keypair_ref = session.query(models.Keypair)\ + .filter_by(gid=gid)\ + .filter_by(keypair_id=keypair_id)\ + .filter_by(deleted=0)\ + .first() + if keypair_ref is None: + raise exception.KeypairNotFound(keypair_id=keypair_id) + + keypair_ref.update(values) + keypair_ref.save(session) + + return dict(keypair_ref) + + +def keypair_delete(context, gid, keypair_id): + session = get_session() + keypair_ref = session.query(models.Keypair)\ + .filter_by(gid=gid)\ + .filter_by(keypair_id=keypair_id)\ + .filter_by(deleted=0)\ + .first() + if keypair_ref is None: + raise exception.KeypairNotFound(keypair_id=keypair_id) + + values = { + "status": "DELETING", + "deleted": 1, + "deleted_at": timeutils.utcnow() + } + keypair_ref.update(values) + keypair_ref.save(session) + + return dict(keypair_ref) + + +def securitygroup_get_all(context, gid, filters={}): + session = get_session() + query = session.query(models.Securitygroup).filter_by(gid=gid, deleted=0) + + if 'securitygroup_id' in filters: + query = query.filter_by(securitygroup_id=filters['securitygroup_id']) + if 'name' in filters: + query = query.filter_by(display_name=filters['name']) + if 'status' in filters: + query = query.filter_by(status=filters['status']) + if 'is_default' in filters: + query = query.filter_by(is_default=filters['is_default']) + securitygroups = query.all() + + return [dict(securitygroup) for securitygroup in securitygroups] + + +def securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id): + session = get_session() + securitygroup = session.query(models.Securitygroup)\ + .filter_by(deleted=0)\ + .filter_by(gid=gid)\ + .filter_by(securitygroup_id=securitygroup_id)\ + .first() + + if not securitygroup: + raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) + + securitygroup_dict = dict(securitygroup) + securitygroup_dict.update(dict(processes=[dict(process) for process in securitygroup.processes])) + return securitygroup_dict + + +def securitygroup_create(context, values): + session = get_session() + securitygroup_ref = models.Securitygroup() + securitygroup_ref.update(values) + securitygroup_ref.save(session) + + return dict(securitygroup_ref) + + +def securitygroup_update(context, gid, securitygroup_id, values): + session = get_session() + securitygroup_ref = session.query(models.Securitygroup). \ + filter_by(deleted=0). \ + filter_by(gid=gid). \ + filter_by(securitygroup_id=securitygroup_id). \ + first() + if securitygroup_ref is None: + raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) + + securitygroup_ref.update(values) + securitygroup_ref.save(session) + + return dict(securitygroup_ref) + +def securitygroup_delete(context, gid, securitygroup_id): + session = get_session() + securitygroup_ref = session.query(models.Securitygroup). \ + filter_by(deleted=0). \ + filter_by(gid = gid). \ + filter_by(securitygroup_id = securitygroup_id). \ + first() + if securitygroup_ref is None: + raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) + + securitygroup_ref.update({"deleted":1, + 'deleted_at':timeutils.utcnow(), + "status":"DELETING"}) + securitygroup_ref.save(session) + + return dict(securitygroup_ref) + + +def process_get_all(context, gid, filters={}): + session = get_session() + query = session.query(models.Process).filter_by(gid=gid, deleted=0) + + + if 'pid' in filters: + query = query.filter_by(pid=filters['pid']) + if 'ppid' in filters: + query = query.filter_by(ppid=filters['ppid']) + if 'name' in filters: + query = query.filter_by(display_name=filters['name']) + if 'status' in filters: + query = query.filter_by(status=filters['status']) + if 'glance_image_id' in filters: + query = query.filter_by(is_default=filters['glance_image_id']) + if 'nova_flavor_id' in filters: + query = query.filter_by(is_default=filters['nova_flavor_id']) + if 'keypair_id' in filters: + query = query.filter_by(keypair_id=filters['keypair_id']) + if 'securitygroup_id' in filters: + query = query.filter( + models.Process.securitygroups.any( + securitygroup_id=filters["securitygroup_id"])) + if 'network_id' in filters: + query = query.filter( + models.Process.networks.any( + network_id=filters["network_id"])) + + process_refs = query.all() + return [_get_process_dict(process_ref) for process_ref in process_refs] + +def process_get_by_pid(context, gid, pid): + session = get_session() + process_ref = session.query(models.Process)\ + .filter_by(deleted=0)\ + .filter_by(gid=gid)\ + .filter_by(pid=pid)\ + .first() + + if not process_ref: + raise exception.ProcessNotFound(pid=pid) + return _get_process_dict(process_ref) + + +def process_create(context, values, network_ids, securitygroup_ids): + session = get_session() + with session.begin(): + process_ref = models.Process(**values) + session.add(process_ref) + + try: + if network_ids: + for network_id in network_ids: + network_ref = session.query(models.Network)\ + .filter_by(deleted=0)\ + .filter_by(gid=values["gid"])\ + .filter_by(network_id=network_id)\ + .first() + if network_ref is None: + raise exception.NetworkNotFound(network_id=network_id) + session.add(models.ProcessNetwork(pid=values["pid"], network_id=network_ref.network_id)) + + if securitygroup_ids: + for securitygroup_id in securitygroup_ids: + securitygroup_ref = session.query(models.Securitygroup)\ + .filter_by(deleted=0)\ + .filter_by(gid=values["gid"])\ + .filter_by(securitygroup_id=securitygroup_id)\ + .first() + if securitygroup_ref is None: + raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) + session.add(models.ProcessSecuritygroup(pid=values["pid"], securitygroup_id=securitygroup_ref.securitygroup_id)) + + session.flush() + except db_exc.DBDuplicateEntry: + msg = _("securitygroup or network is duplicated") + raise exception.InvalidInput(reason=msg) + + return _get_process_dict(process_ref) + +def process_update(context, gid, pid, values): + session = get_session() + process_ref = session.query(models.Process). \ + filter_by(deleted=0). \ + filter_by(gid=gid). \ + filter_by(pid=pid). \ + first() + if process_ref is None: + raise exception.ProcessNotFound(pid=pid) + + process_ref.update(values) + process_ref.save(session) + + return dict(process_ref) + +def process_delete(context, gid, pid): + session = get_session() + process_ref = session.query(models.Process). \ + filter_by(deleted=0). \ + filter_by(gid=gid). \ + filter_by(pid=pid). \ + first() + if process_ref is None: + raise exception.ProcessNotFound(pid=pid) + + process_ref.update({"deleted":1, + 'deleted_at':timeutils.utcnow(), + "status":"DELETING"}) + process_ref.save(session) + + return _get_process_dict(process_ref) + + +def _get_process_dict(process_ref): + process_dict = dict(process_ref) + process_dict.update(dict(securitygroups=[dict(securitygroup) + for securitygroup in process_ref.securitygroups])) + process_dict.update(dict(networks=[dict(network) + for network in process_ref.networks])) + return process_dict diff --git a/rack/db/sqlalchemy/migrate_repo/README b/rack/db/sqlalchemy/migrate_repo/README new file mode 100644 index 0000000..6218f8c --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/rack/db/sqlalchemy/migrate_repo/__init__.py b/rack/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/db/sqlalchemy/migrate_repo/manage.py b/rack/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 0000000..b379141 --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,19 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from migrate.versioning.shell import main + + +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/rack/db/sqlalchemy/migrate_repo/migrate.cfg b/rack/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 0000000..e17cc8a --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=rack + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/rack/db/sqlalchemy/migrate_repo/versions/001_Add_groups_table.py b/rack/db/sqlalchemy/migrate_repo/versions/001_Add_groups_table.py new file mode 100644 index 0000000..0455dc7 --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/versions/001_Add_groups_table.py @@ -0,0 +1,56 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from sqlalchemy import MetaData, Table, Column, Integer, String, DateTime + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +meta = MetaData() + +groups = Table('groups', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Integer), + Column('gid', String(length=255), primary_key=True, nullable=False), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('display_name', String(length=255)), + Column('display_description', String(length=255)), + Column('status', String(length=255)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + try: + groups.create() + except Exception: + LOG.info(repr(groups)) + LOG.exception(_('Exception while creating groups table.')) + raise + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + try: + groups.drop() + except Exception: + LOG.info(repr(groups)) + LOG.exception(_('Exception while dropping groups table.')) + raise \ No newline at end of file diff --git a/rack/db/sqlalchemy/migrate_repo/versions/002_Add_services_table.py b/rack/db/sqlalchemy/migrate_repo/versions/002_Add_services_table.py new file mode 100644 index 0000000..a5c7c3d --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/versions/002_Add_services_table.py @@ -0,0 +1,67 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from migrate.changeset import UniqueConstraint +from sqlalchemy import Column, MetaData, Table +from sqlalchemy import Boolean, DateTime, Integer, String + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +meta = MetaData() + +services = Table('services', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('id', Integer, primary_key=True, nullable=False), + Column('host', String(length=255)), + Column('binary', String(length=255)), + Column('topic', String(length=255)), + Column('report_count', Integer, nullable=False), + Column('disabled', Boolean), + Column('deleted', Integer), + Column('disabled_reason', String(length=255)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + try: + services.create() + except Exception: + LOG.info(repr(services)) + LOG.exception(_('Exception while creating services table.')) + raise + + UniqueConstraint('host', 'topic', 'deleted', + table=services, + name='uniq_services0host0topic0deleted').create() + UniqueConstraint('host', 'binary', 'deleted', + table=services, + name='uniq_services0host0binary0deleted').create() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + try: + services.drop() + except Exception: + LOG.info(repr(services)) + LOG.exception(_('Exception while dropping services table.')) + raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/003_Add_keypairs_table.py b/rack/db/sqlalchemy/migrate_repo/versions/003_Add_keypairs_table.py new file mode 100644 index 0000000..39b124c --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/versions/003_Add_keypairs_table.py @@ -0,0 +1,66 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from migrate import ForeignKeyConstraint +from migrate.changeset import UniqueConstraint +from sqlalchemy import Column, MetaData, Table +from sqlalchemy import Boolean, DateTime, Integer, String, Text + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +meta = MetaData() + +keypairs = Table('keypairs', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Integer), + Column('keypair_id', String(length=36), primary_key=True, nullable=False), + Column('gid', String(length=36), nullable=False), + Column('nova_keypair_id', String(length=255)), + Column('private_key', Text), + Column('display_name', String(length=255)), + Column('is_default', Boolean), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('status', String(length=255)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + try: + keypairs.create() + groups = Table("groups", meta, autoload=True) + ForeignKeyConstraint([keypairs.c.gid], [groups.c.gid]).create() + except Exception: + LOG.info(repr(keypairs)) + LOG.exception(_('Exception while creating keypairs table.')) + raise + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + try: + keypairs.drop() + except Exception: + LOG.info(repr(keypairs)) + LOG.exception(_('Exception while dropping keypairs table.')) + raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/004_Add_securitygroups_table.py b/rack/db/sqlalchemy/migrate_repo/versions/004_Add_securitygroups_table.py new file mode 100644 index 0000000..757baa0 --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/versions/004_Add_securitygroups_table.py @@ -0,0 +1,67 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from migrate.changeset import UniqueConstraint +from migrate import ForeignKeyConstraint +from sqlalchemy import Column, MetaData, Table +from sqlalchemy import Boolean, DateTime, Integer, String + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +meta = MetaData() + + +securitygroups = Table('securitygroups', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Integer, nullable=False), + Column('securitygroup_id', String(length=36), primary_key=True, nullable=False), + Column('gid', String(length=36), nullable=False), + Column('neutron_securitygroup_id', String(length=36)), + Column('is_default', Boolean, nullable=False), + Column('user_id', String(length=255), nullable=False), + Column('project_id', String(length=255), nullable=False), + Column('display_name', String(length=255), nullable=False), + Column('status', String(length=255), nullable=False), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + +def upgrade(migrate_engine): + meta.bind = migrate_engine + groups = Table("groups", meta, autoload=True) + + try: + securitygroups.create() + except Exception: + LOG.info(repr(securitygroups)) + LOG.exception(_('Exception while creating securitygroups table.')) + raise + + ForeignKeyConstraint(columns=[securitygroups.c.gid], + refcolumns=[groups.c.gid]).create() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + try: + securitygroups.drop() + except Exception: + LOG.info(repr(securitygroups)) + LOG.exception(_('Exception while dropping securitygroups table.')) + raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/005_Add_networks_table.py b/rack/db/sqlalchemy/migrate_repo/versions/005_Add_networks_table.py new file mode 100644 index 0000000..d4890f2 --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/versions/005_Add_networks_table.py @@ -0,0 +1,66 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack.openstack.common import log as logging +from migrate import ForeignKeyConstraint +from sqlalchemy import MetaData, Table, Column, Integer +from sqlalchemy import String, DateTime, Boolean + + +LOG = logging.getLogger(__name__) + +meta = MetaData() + +networks = Table('networks', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('network_id', String(length=255), + primary_key=True, nullable=False), + Column('gid', String(length=255), nullable=False), + Column('neutron_network_id', String(length=255)), + Column('is_admin', Boolean), + Column('subnet', String(length=255)), + Column('ext_router', String(length=255)), + Column('user_id', String(length=255)), + Column('project_id', String(length=255)), + Column('display_name', String(length=255)), + Column('deleted', Integer), + Column('status', String(length=255)), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + try: + networks.create() + groups = Table("groups", meta, autoload=True) + ForeignKeyConstraint([networks.c.gid], [groups.c.gid]).create() + except Exception: + LOG.info(repr(networks)) + LOG.exception(_('Exception while creating networks table.')) + raise + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + try: + networks.drop() + except Exception: + LOG.info(repr(networks)) + LOG.exception(_('Exception while dropping networks table.')) + raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/006_Add_processes_table.py b/rack/db/sqlalchemy/migrate_repo/versions/006_Add_processes_table.py new file mode 100644 index 0000000..5f42657 --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/versions/006_Add_processes_table.py @@ -0,0 +1,76 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from migrate import ForeignKeyConstraint +from sqlalchemy import Column, MetaData, Table +from sqlalchemy import DateTime, Integer, String + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +meta = MetaData() + + +processes = Table('processes', meta, + Column('created_at', DateTime), + Column('updated_at', DateTime), + Column('deleted_at', DateTime), + Column('deleted', Integer, nullable=False), + Column('gid', String(length=36), nullable=False), + Column('keypair_id', String(length=36)), + Column('pid', String(length=36), primary_key=True, nullable=False), + Column('ppid', String(length=36)), + Column('nova_instance_id', String(length=36)), + Column('glance_image_id', String(length=36), nullable=False), + Column('nova_flavor_id', Integer, nullable=False), + Column('user_id', String(length=255), nullable=False), + Column('project_id', String(length=255), nullable=False), + Column('display_name', String(length=255), nullable=False), + Column('status', String(length=255), nullable=False), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + groups = Table("groups", meta, autoload=True) + keypairs = Table("keypairs", meta, autoload=True) + + try: + processes.create() + except Exception: + LOG.info(repr(processes)) + LOG.exception(_('Exception while creating processes table.')) + raise + + ForeignKeyConstraint(columns=[processes.c.gid], + refcolumns=[groups.c.gid]).create() + + ForeignKeyConstraint(columns=[processes.c.keypair_id], + refcolumns=[keypairs.c.keypair_id]).create() + + ForeignKeyConstraint(columns=[processes.c.ppid], + refcolumns=[processes.c.pid]).create() + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + try: + processes.drop() + except Exception: + LOG.info(repr(processes)) + LOG.exception(_('Exception while dropping processes table.')) + raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/007_Add_processes_securitygroups_table.py b/rack/db/sqlalchemy/migrate_repo/versions/007_Add_processes_securitygroups_table.py new file mode 100644 index 0000000..d693b7f --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/versions/007_Add_processes_securitygroups_table.py @@ -0,0 +1,60 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from migrate import ForeignKeyConstraint, UniqueConstraint +from sqlalchemy import Column, MetaData, Table +from sqlalchemy import String + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +meta = MetaData() + + +processes_securitygroups = Table('processes_securitygroups', meta, + Column('pid', String(length=36), nullable=False, primary_key=True), + Column('securitygroup_id', String(length=36), nullable=False, primary_key=True), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + processes = Table("processes", meta, autoload=True) + securitygroups = Table("securitygroups", meta, autoload=True) + + try: + processes_securitygroups.create() + except Exception: + LOG.info(repr(processes_securitygroups)) + LOG.exception(_('Exception while creating processes_securitygroups table.')) + raise + + ForeignKeyConstraint(columns=[processes_securitygroups.c.pid], + refcolumns=[processes.c.pid]).create() + ForeignKeyConstraint(columns=[processes_securitygroups.c.securitygroup_id], + refcolumns=[securitygroups.c.securitygroup_id]).create() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + try: + processes_securitygroups.drop() + except Exception: + LOG.info(repr(processes_securitygroups)) + LOG.exception(_('Exception while dropping processes_securitygroups table.')) + raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/008_Add_processes_networks_table.py b/rack/db/sqlalchemy/migrate_repo/versions/008_Add_processes_networks_table.py new file mode 100644 index 0000000..b5498e6 --- /dev/null +++ b/rack/db/sqlalchemy/migrate_repo/versions/008_Add_processes_networks_table.py @@ -0,0 +1,60 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from migrate import ForeignKeyConstraint +from sqlalchemy import Column, MetaData, Table +from sqlalchemy import String + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +meta = MetaData() + + +processes_networks = Table('processes_networks', meta, + Column('pid', String(length=36), nullable=False, primary_key=True), + Column('network_id', String(length=36), nullable=False, primary_key=True), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + processes = Table("processes", meta, autoload=True) + networks = Table("networks", meta, autoload=True) + + try: + processes_networks.create() + except Exception: + LOG.info(repr(processes_networks)) + LOG.exception(_('Exception while creating processes_networks table.')) + raise + + ForeignKeyConstraint(columns=[processes_networks.c.pid], + refcolumns=[processes.c.pid]).create() + ForeignKeyConstraint(columns=[processes_networks.c.network_id], + refcolumns=[networks.c.network_id]).create() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + try: + processes_networks.drop() + except Exception: + LOG.info(repr(processes_networks)) + LOG.exception(_('Exception while dropping processes_networks table.')) + raise diff --git a/rack/db/sqlalchemy/migrate_repo/versions/__init__.py b/rack/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/db/sqlalchemy/migration.py b/rack/db/sqlalchemy/migration.py new file mode 100644 index 0000000..dbd72c7 --- /dev/null +++ b/rack/db/sqlalchemy/migration.py @@ -0,0 +1,85 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository +import sqlalchemy + +from rack.db.sqlalchemy import api as db_session +from rack import exception +from rack.openstack.common.gettextutils import _ + +INIT_VERSION = 0 +_REPOSITORY = None + +get_engine = db_session.get_engine + + +def db_sync(version=None): + if version is not None: + try: + version = int(version) + except ValueError: + raise exception.RackException(_("version should be an integer")) + + current_version = db_version() + repository = _find_migrate_repo() + if version is None or version > current_version: + return versioning_api.upgrade(get_engine(), repository, version) + else: + return versioning_api.downgrade(get_engine(), repository, + version) + + +def db_version(): + repository = _find_migrate_repo() + try: + return versioning_api.db_version(get_engine(), repository) + except versioning_exceptions.DatabaseNotControlledError: + meta = sqlalchemy.MetaData() + engine = get_engine() + meta.reflect(bind=engine) + tables = meta.tables + if len(tables) == 0: + db_version_control(INIT_VERSION) + return versioning_api.db_version(get_engine(), repository) + else: + # Some pre-Essex DB's may not be version controlled. + # Require them to upgrade using Essex first. + raise exception.RackException( + _("Upgrade DB using Essex release first.")) + + +def db_initial_version(): + return INIT_VERSION + + +def db_version_control(version=None): + repository = _find_migrate_repo() + versioning_api.version_control(get_engine(), repository, version) + return version + + +def _find_migrate_repo(): + """Get the path for the migrate repository.""" + global _REPOSITORY + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') + assert os.path.exists(path) + if _REPOSITORY is None: + _REPOSITORY = Repository(path) + return _REPOSITORY diff --git a/rack/db/sqlalchemy/models.py b/rack/db/sqlalchemy/models.py new file mode 100644 index 0000000..1cbba82 --- /dev/null +++ b/rack/db/sqlalchemy/models.py @@ -0,0 +1,184 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack.openstack.common.db.sqlalchemy import models + +from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, Text, schema +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relationship + +Base = declarative_base() + +class Group(models.SoftDeleteMixin, + models.TimestampMixin, + models.ModelBase, + Base): + + __tablename__ = 'groups' + securitygroups = relationship("Securitygroup") + processes = relationship("Process") + + gid = Column(String(36), primary_key=True) + user_id = Column(String(255)) + project_id = Column(String(255)) + display_name = Column(String(255)) + display_description = Column(String(255)) + status = Column(String(255)) + + +class Service(models.SoftDeleteMixin, + models.TimestampMixin, + models.ModelBase, + Base): + """Represents a running service on a host.""" + + __tablename__ = 'services' + __table_args__ = ( + schema.UniqueConstraint("host", "topic", "deleted", + name="uniq_services0host0topic0deleted"), + schema.UniqueConstraint("host", "binary", "deleted", + name="uniq_services0host0binary0deleted") + ) + + id = Column(Integer, primary_key=True) + host = Column(String(255)) + binary = Column(String(255)) + topic = Column(String(255)) + report_count = Column(Integer, nullable=False, default=0) + disabled = Column(Boolean, default=False) + disabled_reason = Column(String(255)) + + +class Network(models.SoftDeleteMixin, + models.TimestampMixin, + models.ModelBase, + Base): + + __tablename__ = 'networks' + + network_id = Column(String(255), primary_key=True) + gid = Column(String(255)) + neutron_network_id = Column(String(255)) + is_admin = Column(Boolean, default=False) + subnet = Column(String(255)) + ext_router = Column(String(255)) + user_id = Column(String(255)) + project_id = Column(String(255)) + display_name = Column(String(255)) + status = Column(String(255)) + + +class Keypair(models.SoftDeleteMixin, + models.TimestampMixin, + models.ModelBase, + Base): + + __tablename__ = 'keypairs' + + keypair_id = Column(String(36), primary_key=True) + gid = Column(String(36), ForeignKey('groups.gid'), nullable=False) + user_id = Column(String(255)) + project_id = Column(String(255)) + nova_keypair_id = Column(String(255)) + private_key = Column(Text) + display_name = Column(String(255)) + is_default = Column(Boolean, default=False) + status = Column(String(255)) + + +class Securitygroup(models.SoftDeleteMixin, + models.TimestampMixin, + models.ModelBase, + Base): + + __tablename__ = 'securitygroups' + + deleted = Column(Integer, nullable=False, default=0) + securitygroup_id = Column(String(36), primary_key=True) + gid = Column(String(36), ForeignKey('groups.gid')) + neutron_securitygroup_id = Column(String(36)) + is_default = Column(Boolean, default=False) + user_id = Column(String(255)) + project_id = Column(String(255)) + display_name = Column(String(255)) + status = Column(String(255)) + + group = relationship("Group", + foreign_keys=gid, + primaryjoin='and_(' + 'Securitygroup.gid == Group.gid,' + 'Securitygroup.deleted == 0,' + 'Group.deleted == 0)') + + +class Process(models.SoftDeleteMixin, + models.TimestampMixin, + models.ModelBase, + Base): + + __tablename__ = 'processes' + + + deleted = Column(Integer, nullable=False, default=0) + gid = Column(String(36), ForeignKey('groups.gid'), nullable=False) + keypair_id = Column(String(36), ForeignKey('keypairs.keypair_id')) + pid = Column(String(36), primary_key=True) + ppid = Column(String(36), ForeignKey('processes.pid')) + nova_instance_id = Column(String(36)) + glance_image_id = Column(String(36), nullable=False) + nova_flavor_id = Column(Integer, nullable=False) + user_id = Column(String(255), nullable=False) + project_id = Column(String(255), nullable=False) + display_name = Column(String(255), nullable=False) + status = Column(String(255), nullable=False) + + group = relationship("Group", + foreign_keys=gid, + primaryjoin='and_(' + 'Process.gid == Group.gid,' + 'Process.deleted == 0,' + 'Group.deleted == 0)') + + securitygroups = relationship("Securitygroup", + secondary="processes_securitygroups", + primaryjoin='and_(' + 'Process.pid == ProcessSecuritygroup.pid,' + 'Process.deleted == 0)', + secondaryjoin='and_(' + 'Securitygroup.securitygroup_id == ProcessSecuritygroup.securitygroup_id,' + 'Securitygroup.deleted == 0)', + backref="processes") + + networks = relationship("Network", + secondary="processes_networks", + primaryjoin='and_(' + 'Process.pid == ProcessNetwork.pid,' + 'Process.deleted == 0)', + secondaryjoin='and_(' + 'Network.network_id == ProcessNetwork.network_id,' + 'Network.deleted == 0)', + backref="processes") + +class ProcessSecuritygroup(models.ModelBase,Base): + + __tablename__ = 'processes_securitygroups' + + pid = Column(String(36), ForeignKey('processes.pid'), nullable=False, primary_key=True) + securitygroup_id = Column(String(36), ForeignKey('securitygroups.securitygroup_id'), nullable=False, primary_key=True) + +class ProcessNetwork(models.ModelBase,Base): + + __tablename__ = 'processes_networks' + + pid = Column(String(36), ForeignKey('processes.pid'), nullable=False, primary_key=True) + network_id = Column(String(36), ForeignKey('networks.network_id'), nullable=False, primary_key=True) diff --git a/rack/db/sqlalchemy/types.py b/rack/db/sqlalchemy/types.py new file mode 100644 index 0000000..4e8cb74 --- /dev/null +++ b/rack/db/sqlalchemy/types.py @@ -0,0 +1,61 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Custom SQLAlchemy types.""" + +from sqlalchemy.dialects import postgresql +from sqlalchemy import types + +from rack import utils + + +class IPAddress(types.TypeDecorator): + """An SQLAlchemy type representing an IP-address.""" + + impl = types.String + + def load_dialect_impl(self, dialect): + if dialect.name == 'postgresql': + return dialect.type_descriptor(postgresql.INET()) + else: + return dialect.type_descriptor(types.String(39)) + + def process_bind_param(self, value, dialect): + """Process/Formats the value before insert it into the db.""" + if dialect.name == 'postgresql': + return value + # NOTE(maurosr): The purpose here is to convert ipv6 to the shortened + # form, not validate it. + elif utils.is_valid_ipv6(value): + return utils.get_shortened_ipv6(value) + return value + + +class CIDR(types.TypeDecorator): + """An SQLAlchemy type representing a CIDR definition.""" + + impl = types.String + + def load_dialect_impl(self, dialect): + if dialect.name == 'postgresql': + return dialect.type_descriptor(postgresql.INET()) + else: + return dialect.type_descriptor(types.String(43)) + + def process_bind_param(self, value, dialect): + """Process/Formats the value before insert it into the db.""" + # NOTE(sdague): normalize all the inserts + if utils.is_valid_ipv6_cidr(value): + return utils.get_shortened_ipv6_cidr(value) + return value diff --git a/rack/db/sqlalchemy/utils.py b/rack/db/sqlalchemy/utils.py new file mode 100644 index 0000000..6e506ed --- /dev/null +++ b/rack/db/sqlalchemy/utils.py @@ -0,0 +1,606 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +from migrate.changeset import UniqueConstraint, ForeignKeyConstraint +from sqlalchemy import Boolean +from sqlalchemy import CheckConstraint +from sqlalchemy import Column +from sqlalchemy.engine import reflection +from sqlalchemy.exc import OperationalError +from sqlalchemy.exc import ProgrammingError +from sqlalchemy.ext.compiler import compiles +from sqlalchemy import func +from sqlalchemy import Index +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy import schema +from sqlalchemy.sql.expression import literal_column +from sqlalchemy.sql.expression import UpdateBase +from sqlalchemy.sql import select +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy.types import NullType + +from rack.db.sqlalchemy import api as db +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + + +def get_table(engine, name): + """Returns an sqlalchemy table dynamically from db. + + Needed because the models don't work for us in migrations + as models will be far out of sync with the current data. + """ + metadata = MetaData() + metadata.bind = engine + return Table(name, metadata, autoload=True) + + +class InsertFromSelect(UpdateBase): + def __init__(self, table, select): + self.table = table + self.select = select + + +@compiles(InsertFromSelect) +def visit_insert_from_select(element, compiler, **kw): + return "INSERT INTO %s %s" % ( + compiler.process(element.table, asfrom=True), + compiler.process(element.select)) + + +class DeleteFromSelect(UpdateBase): + def __init__(self, table, select, column): + self.table = table + self.select = select + self.column = column + + +@compiles(DeleteFromSelect) +def visit_delete_from_select(element, compiler, **kw): + return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % ( + compiler.process(element.table, asfrom=True), + compiler.process(element.column), + element.column.name, + compiler.process(element.select)) + + +def _get_not_supported_column(col_name_col_instance, column_name): + try: + column = col_name_col_instance[column_name] + except Exception: + msg = _("Please specify column %s in col_name_col_instance " + "param. It is required because column has unsupported " + "type by sqlite).") + raise exception.RackException(msg % column_name) + + if not isinstance(column, Column): + msg = _("col_name_col_instance param has wrong type of " + "column instance for column %s It should be instance " + "of sqlalchemy.Column.") + raise exception.RackException(msg % column_name) + return column + + +def _get_unique_constraints_in_sqlite(migrate_engine, table_name): + regexp = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" + + meta = MetaData(bind=migrate_engine) + table = Table(table_name, meta, autoload=True) + + sql_data = migrate_engine.execute( + """ + SELECT sql + FROM + sqlite_master + WHERE + type = 'table' AND + name = :table_name; + """, + table_name=table_name + ).fetchone()[0] + + uniques = set([ + schema.UniqueConstraint( + *[getattr(table.c, c.strip(' "')) + for c in cols.split(",")], name=name + ) + for name, cols in re.findall(regexp, sql_data) + ]) + + return uniques + + +def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, + **col_name_col_instance): + insp = reflection.Inspector.from_engine(migrate_engine) + meta = MetaData(bind=migrate_engine) + + table = Table(table_name, meta, autoload=True) + columns = [] + for column in table.columns: + if isinstance(column.type, NullType): + new_column = _get_not_supported_column(col_name_col_instance, + column.name) + columns.append(new_column) + else: + columns.append(column.copy()) + + uniques = _get_unique_constraints_in_sqlite(migrate_engine, table_name) + table.constraints.update(uniques) + + constraints = [constraint for constraint in table.constraints + if not constraint.name == uc_name and + not isinstance(constraint, schema.ForeignKeyConstraint)] + + new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], + *column_names, + unique=index["unique"])) + f_keys = [] + for fk in insp.get_foreign_keys(table_name): + refcolumns = [fk['referred_table'] + '.' + col + for col in fk['referred_columns']] + f_keys.append(ForeignKeyConstraint(fk['constrained_columns'], + refcolumns, table=new_table, name=fk['name'])) + + ins = InsertFromSelect(new_table, table.select()) + migrate_engine.execute(ins) + table.drop() + + [index.create(migrate_engine) for index in indexes] + for fkey in f_keys: + fkey.create() + new_table.rename(table_name) + + +def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, + **col_name_col_instance): + """This method drops UC from table and works for mysql, postgresql and + sqlite. In mysql and postgresql we are able to use "alter table" + construction. In sqlite is only one way to drop UC: + 1) Create new table with same columns, indexes and constraints + (except one that we want to drop). + 2) Copy data from old table to new. + 3) Drop old table. + 4) Rename new table to the name of old table. + + :param migrate_engine: sqlalchemy engine + :param table_name: name of table that contains uniq constraint. + :param uc_name: name of uniq constraint that will be dropped. + :param columns: columns that are in uniq constraint. + :param col_name_col_instance: contains pair column_name=column_instance. + column_instance is instance of Column. These params + are required only for columns that have unsupported + types by sqlite. For example BigInteger. + """ + if migrate_engine.name == "sqlite": + _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, + **col_name_col_instance) + else: + meta = MetaData() + meta.bind = migrate_engine + t = Table(table_name, meta, autoload=True) + uc = UniqueConstraint(*columns, table=t, name=uc_name) + uc.drop() + + +def drop_old_duplicate_entries_from_table(migrate_engine, table_name, + use_soft_delete, *uc_column_names): + """This method is used to drop all old rows that have the same values for + columns in uc_columns. + """ + meta = MetaData() + meta.bind = migrate_engine + + table = Table(table_name, meta, autoload=True) + columns_for_group_by = [table.c[name] for name in uc_column_names] + + columns_for_select = [func.max(table.c.id)] + columns_for_select.extend(list(columns_for_group_by)) + + duplicated_rows_select = select(columns_for_select, + group_by=columns_for_group_by, + having=func.count(table.c.id) > 1) + + for row in migrate_engine.execute(duplicated_rows_select): + # NOTE(boris-42): Do not remove row that has the biggest ID. + delete_condition = table.c.id != row[0] + for name in uc_column_names: + delete_condition &= table.c[name] == row[name] + + rows_to_delete_select = select([table.c.id]).where(delete_condition) + for row in migrate_engine.execute(rows_to_delete_select).fetchall(): + LOG.info(_("Deleted duplicated row with id: %(id)s from table: " + "%(table)s") % dict(id=row[0], table=table_name)) + + if use_soft_delete: + delete_statement = table.update().\ + where(delete_condition).\ + values({ + 'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow() + }) + else: + delete_statement = table.delete().where(delete_condition) + migrate_engine.execute(delete_statement) + + +def check_shadow_table(migrate_engine, table_name): + """This method checks that table with ``table_name`` and + corresponding shadow table have same columns. + """ + meta = MetaData() + meta.bind = migrate_engine + + table = Table(table_name, meta, autoload=True) + shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta, + autoload=True) + + columns = dict([(c.name, c) for c in table.columns]) + shadow_columns = dict([(c.name, c) for c in shadow_table.columns]) + + for name, column in columns.iteritems(): + if name not in shadow_columns: + raise exception.RackException( + _("Missing column %(table)s.%(column)s in shadow table") + % {'column': name, 'table': shadow_table.name}) + shadow_column = shadow_columns[name] + + if not isinstance(shadow_column.type, type(column.type)): + raise exception.RackException( + _("Different types in %(table)s.%(column)s and shadow table: " + "%(c_type)s %(shadow_c_type)s") + % {'column': name, 'table': table.name, + 'c_type': column.type, + 'shadow_c_type': shadow_column.type}) + + for name, column in shadow_columns.iteritems(): + if name not in columns: + raise exception.RackException( + _("Extra column %(table)s.%(column)s in shadow table") + % {'column': name, 'table': shadow_table.name}) + return True + + +def create_shadow_table(migrate_engine, table_name=None, table=None, + **col_name_col_instance): + """This method create shadow table for table with name ``table_name`` + or table instance ``table``. + :param table_name: Autoload table with this name and create shadow table + :param table: Autoloaded table, so just create corresponding shadow table. + :param col_name_col_instance: contains pair column_name=column_instance. + column_instance is instance of Column. These params + are required only for columns that have unsupported + types by sqlite. For example BigInteger. + + :returns: The created shadow_table object. + """ + meta = MetaData(bind=migrate_engine) + + if table_name is None and table is None: + raise exception.RackException(_("Specify `table_name` or `table` " + "param")) + if not (table_name is None or table is None): + raise exception.RackException(_("Specify only one param `table_name` " + "`table`")) + + if table is None: + table = Table(table_name, meta, autoload=True) + + columns = [] + for column in table.columns: + if isinstance(column.type, NullType): + new_column = _get_not_supported_column(col_name_col_instance, + column.name) + columns.append(new_column) + else: + columns.append(column.copy()) + + shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name + shadow_table = Table(shadow_table_name, meta, *columns, + mysql_engine='InnoDB') + try: + shadow_table.create() + return shadow_table + except (OperationalError, ProgrammingError): + LOG.info(repr(shadow_table)) + LOG.exception(_('Exception while creating table.')) + raise exception.ShadowTableExists(name=shadow_table_name) + except Exception: + LOG.info(repr(shadow_table)) + LOG.exception(_('Exception while creating table.')) + + +def _get_default_deleted_value(table): + if isinstance(table.c.id.type, Integer): + return 0 + if isinstance(table.c.id.type, String): + return "" + raise exception.RackException(_("Unsupported id columns type")) + + +def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): + table = get_table(migrate_engine, table_name) + + insp = reflection.Inspector.from_engine(migrate_engine) + real_indexes = insp.get_indexes(table_name) + existing_index_names = dict([(index['name'], index['column_names']) + for index in real_indexes]) + + # NOTE(boris-42): Restore indexes on `deleted` column + for index in indexes: + if 'deleted' not in index['column_names']: + continue + name = index['name'] + if name in existing_index_names: + column_names = [table.c[c] for c in existing_index_names[name]] + old_index = Index(name, *column_names, unique=index["unique"]) + old_index.drop(migrate_engine) + + column_names = [table.c[c] for c in index['column_names']] + new_index = Index(index["name"], *column_names, unique=index["unique"]) + new_index.create(migrate_engine) + + +def change_deleted_column_type_to_boolean(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_boolean_sqlite(migrate_engine, + table_name, + **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + old_deleted = Column('old_deleted', Boolean, default=False) + old_deleted.create(table, populate_default=False) + + table.update().\ + where(table.c.deleted == table.c.id).\ + values(old_deleted=True).\ + execute() + + table.c.deleted.drop() + table.c.old_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, + **col_name_col_instance): + insp = reflection.Inspector.from_engine(migrate_engine) + table = get_table(migrate_engine, table_name) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', Boolean, default=0) + columns.append(column_copy) + + constraints = [constraint.copy() for constraint in table.constraints] + + meta = MetaData(bind=migrate_engine) + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + c_select = [] + for c in table.c: + if c.name != "deleted": + c_select.append(c) + else: + c_select.append(table.c.deleted == table.c.id) + + ins = InsertFromSelect(new_table, select(c_select)) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + new_table.update().\ + where(new_table.c.deleted == new_table.c.id).\ + values(deleted=True).\ + execute() + + +def change_deleted_column_type_to_id_type(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_id_type_sqlite(migrate_engine, + table_name, + **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + new_deleted = Column('new_deleted', table.c.id.type, + default=_get_default_deleted_value(table)) + new_deleted.create(table, populate_default=True) + + table.update().\ + where(table.c.deleted == True).\ + values(new_deleted=table.c.id).\ + execute() + table.c.deleted.drop() + table.c.new_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, + **col_name_col_instance): + # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check + # constraints in sqlite DB and our `deleted` column has + # 2 check constraints. So there is only one way to remove + # these constraints: + # 1) Create new table with the same columns, constraints + # and indexes. (except deleted column). + # 2) Copy all data from old to new table. + # 3) Drop old table. + # 4) Rename new table to old table name. + insp = reflection.Inspector.from_engine(migrate_engine) + meta = MetaData(bind=migrate_engine) + table = Table(table_name, meta, autoload=True) + default_deleted_value = _get_default_deleted_value(table) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', table.c.id.type, + default=default_deleted_value) + columns.append(column_copy) + + def is_deleted_column_constraint(constraint): + # NOTE(boris-42): There is no other way to check is CheckConstraint + # associated with deleted column. + if not isinstance(constraint, CheckConstraint): + return False + sqltext = str(constraint.sqltext) + # NOTE(I159): when the type of column `deleted` is changed from boolean + # to int, the corresponding CHECK constraint is dropped too. But + # starting from SQLAlchemy version 0.8.3, those CHECK constraints + # aren't dropped anymore. So despite the fact that column deleted is + # of type int now, we still restrict its values to be either 0 or 1. + constraint_markers = ( + "deleted in (0, 1)", + "deleted IN (:deleted_1, :deleted_2)", + "deleted IN (:param_1, :param_2)" + ) + return any(sqltext.endswith(marker) for marker in constraint_markers) + + constraints = [] + for constraint in table.constraints: + if not is_deleted_column_constraint(constraint): + constraints.append(constraint.copy()) + + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + ins = InsertFromSelect(new_table, table.select()) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + new_table.update().\ + where(new_table.c.deleted == True).\ + values(deleted=new_table.c.id).\ + execute() + + # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. + new_table.update().\ + where(new_table.c.deleted == False).\ + values(deleted=default_deleted_value).\ + execute() + + +def _index_exists(migrate_engine, table_name, index_name): + inspector = reflection.Inspector.from_engine(migrate_engine) + indexes = inspector.get_indexes(table_name) + index_names = [index['name'] for index in indexes] + + return index_name in index_names + + +def _add_index(migrate_engine, table, index_name, idx_columns): + index = Index( + index_name, *[getattr(table.c, col) for col in idx_columns] + ) + index.create() + + +def _drop_index(migrate_engine, table, index_name, idx_columns): + if _index_exists(migrate_engine, table.name, index_name): + index = Index( + index_name, *[getattr(table.c, col) for col in idx_columns] + ) + index.drop() + + +def _change_index_columns(migrate_engine, table, index_name, + new_columns, old_columns): + _drop_index(migrate_engine, table, index_name, old_columns) + _add_index(migrate_engine, table, index_name, new_columns) + + +def modify_indexes(migrate_engine, data, upgrade=True): + if migrate_engine.name == 'sqlite': + return + + meta = MetaData() + meta.bind = migrate_engine + + for table_name, indexes in data.iteritems(): + table = Table(table_name, meta, autoload=True) + + for index_name, old_columns, new_columns in indexes: + if not upgrade: + new_columns, old_columns = old_columns, new_columns + + if migrate_engine.name == 'postgresql': + if upgrade: + _add_index(migrate_engine, table, index_name, new_columns) + else: + _drop_index(migrate_engine, table, index_name, old_columns) + elif migrate_engine.name == 'mysql': + _change_index_columns(migrate_engine, table, index_name, + new_columns, old_columns) + else: + raise ValueError('Unsupported DB %s' % migrate_engine.name) diff --git a/rack/debugger.py b/rack/debugger.py new file mode 100644 index 0000000..709935d --- /dev/null +++ b/rack/debugger.py @@ -0,0 +1,75 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import sys + + +def enabled(): + return ('--remote_debug-host' in sys.argv and + '--remote_debug-port' in sys.argv) + + +def register_cli_opts(): + from oslo.config import cfg + + cli_opts = [ + cfg.StrOpt('host', + help='Debug host (IP or name) to connect. Note ' + 'that using the remote debug option changes how ' + 'Rack uses the eventlet library to support async IO. ' + 'This could result in failures that do not occur ' + 'under normal operation. Use at your own risk.'), + + cfg.IntOpt('port', + help='Debug port to connect. Note ' + 'that using the remote debug option changes how ' + 'Rack uses the eventlet library to support async IO. ' + 'This could result in failures that do not occur ' + 'under normal operation. Use at your own risk.') + + ] + + cfg.CONF.register_cli_opts(cli_opts, 'remote_debug') + + +def init(): + from oslo.config import cfg + CONF = cfg.CONF + + # NOTE(markmc): gracefully handle the CLI options not being registered + if 'remote_debug' not in CONF: + return + + if not (CONF.remote_debug.host and CONF.remote_debug.port): + return + + from rack.openstack.common.gettextutils import _ + from rack.openstack.common import log as logging + LOG = logging.getLogger(__name__) + + LOG.debug(_('Listening on %(host)s:%(port)s for debug connection'), + {'host': CONF.remote_debug.host, + 'port': CONF.remote_debug.port}) + + from pydev import pydevd + pydevd.settrace(host=CONF.remote_debug.host, + port=CONF.remote_debug.port, + stdoutToServer=False, + stderrToServer=False) + + LOG.warn(_('WARNING: Using the remote debug option changes how ' + 'Rack uses the eventlet library to support async IO. This ' + 'could result in failures that do not occur under normal ' + 'operation. Use at your own risk.')) diff --git a/rack/exception.py b/rack/exception.py new file mode 100644 index 0000000..245ea08 --- /dev/null +++ b/rack/exception.py @@ -0,0 +1,1598 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import functools +import sys + +from oslo.config import cfg +import webob.exc + +from rack.openstack.common import excutils +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack import safe_utils + +LOG = logging.getLogger(__name__) + +exc_log_opts = [ + cfg.BoolOpt('fatal_exception_format_errors', + default=False, + help='Make exception message format errors fatal'), +] + +CONF = cfg.CONF +CONF.register_opts(exc_log_opts) + + +class ConvertedException(webob.exc.WSGIHTTPException): + def __init__(self, code=0, title="", explanation=""): + self.code = code + self.title = title + self.explanation = explanation + super(ConvertedException, self).__init__() + + +def _cleanse_dict(original): + """Strip all admin_password, new_pass, rescue_pass keys from a dict.""" + return dict((k, v) for k, v in original.iteritems() if not "_pass" in k) + + +def wrap_exception(notifier=None, get_notifier=None): + """This decorator wraps a method to catch any exceptions that may + get thrown. It logs the exception as well as optionally sending + it to the notification system. + """ + def inner(f): + def wrapped(self, context, *args, **kw): + # Don't store self or context in the payload, it now seems to + # contain confidential information. + try: + return f(self, context, *args, **kw) + except Exception as e: + with excutils.save_and_reraise_exception(): + if notifier or get_notifier: + payload = dict(exception=e) + call_dict = safe_utils.getcallargs(f, context, + *args, **kw) + cleansed = _cleanse_dict(call_dict) + payload.update({'args': cleansed}) + + # If f has multiple decorators, they must use + # functools.wraps to ensure the name is + # propagated. + event_type = f.__name__ + + (notifier or get_notifier()).error(context, + event_type, + payload) + + return functools.wraps(f)(wrapped) + return inner + + +class RackException(Exception): + """Base Rack Exception + + To correctly use this class, inherit from it and define + a 'msg_fmt' property. That msg_fmt will get printf'd + with the keyword arguments provided to the constructor. + + """ + msg_fmt = _("An unknown exception occurred.") + code = 500 + headers = {} + safe = False + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.msg_fmt % kwargs + + except Exception: + exc_info = sys.exc_info() + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + + if CONF.fatal_exception_format_errors: + raise exc_info[0], exc_info[1], exc_info[2] + else: + # at least get the core message out if something happened + message = self.msg_fmt + + super(RackException, self).__init__(message) + + def format_message(self): + # NOTE(mrodden): use the first argument to the python Exception object + # which should be our full RackException message, (see __init__) + return self.args[0] + + +class EncryptionFailure(RackException): + msg_fmt = _("Failed to encrypt text: %(reason)s") + + +class DecryptionFailure(RackException): + msg_fmt = _("Failed to decrypt text: %(reason)s") + + +class VirtualInterfaceCreateException(RackException): + msg_fmt = _("Virtual Interface creation failed") + + +class VirtualInterfaceMacAddressException(RackException): + msg_fmt = _("Creation of virtual interface with " + "unique mac address failed") + + +class GlanceConnectionFailed(RackException): + msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: " + "%(reason)s") + + +class NotAuthorized(RackException): + ec2_code = 'AuthFailure' + msg_fmt = _("Not authorized.") + code = 403 + + +class AdminRequired(NotAuthorized): + msg_fmt = _("User does not have admin privileges") + + +class PolicyNotAuthorized(NotAuthorized): + msg_fmt = _("Policy doesn't allow %(action)s to be performed.") + + +class ImageNotActive(RackException): + # NOTE(jruzicka): IncorrectState is used for volumes only in EC2, + # but it still seems like the most appropriate option. + ec2_code = 'IncorrectState' + msg_fmt = _("Image %(image_id)s is not active.") + + +class ImageNotAuthorized(RackException): + msg_fmt = _("Not authorized for image %(image_id)s.") + + +class Invalid(RackException): + msg_fmt = _("Unacceptable parameters.") + code = 400 + + +class InvalidBDM(Invalid): + msg_fmt = _("Block Device Mapping is Invalid.") + + +class InvalidBDMSnapshot(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "failed to get snapshot %(id)s.") + + +class InvalidBDMVolume(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "failed to get volume %(id)s.") + + +class InvalidBDMImage(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "failed to get image %(id)s.") + + +class InvalidBDMBootSequence(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "Boot sequence for the instance " + "and image/block device mapping " + "combination is not valid.") + + +class InvalidBDMLocalsLimit(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "You specified more local devices than the " + "limit allows") + + +class InvalidBDMEphemeralSize(InvalidBDM): + msg_fmt = _("Ephemeral disks requested are larger than " + "the instance type allows.") + + +class InvalidBDMSwapSize(InvalidBDM): + msg_fmt = _("Swap drive requested is larger than instance type allows.") + + +class InvalidBDMFormat(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "%(details)s") + + +class InvalidBDMForLegacy(InvalidBDM): + msg_fmt = _("Block Device Mapping cannot " + "be converted to legacy format. ") + + +class InvalidAttribute(Invalid): + msg_fmt = _("Attribute not supported: %(attr)s") + + +class ValidationError(Invalid): + msg_fmt = "%(detail)s" + + +class VolumeUnattached(Invalid): + ec2_code = 'IncorrectState' + msg_fmt = _("Volume %(volume_id)s is not attached to anything") + + +class VolumeNotCreated(RackException): + msg_fmt = _("Volume %(volume_id)s did not finish being created" + " even after we waited %(seconds)s seconds or %(attempts)s" + " attempts.") + + +class InvalidKeypair(Invalid): + ec2_code = 'InvalidKeyPair.Format' + msg_fmt = _("Keypair data is invalid: %(reason)s") + + +class InvalidRequest(Invalid): + msg_fmt = _("The request is invalid.") + + +class InvalidInput(Invalid): + msg_fmt = _("Invalid input received: %(reason)s") + + +class InvalidVolume(Invalid): + ec2_code = 'UnsupportedOperation' + msg_fmt = _("Invalid volume: %(reason)s") + + +class InvalidVolumeAccessMode(Invalid): + msg_fmt = _("Invalid volume access mode") + ": %(access_mode)s" + + +class InvalidMetadata(Invalid): + msg_fmt = _("Invalid metadata: %(reason)s") + + +class InvalidMetadataSize(Invalid): + msg_fmt = _("Invalid metadata size: %(reason)s") + + +class InvalidPortRange(Invalid): + ec2_code = 'InvalidParameterValue' + msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") + + +class InvalidIpProtocol(Invalid): + msg_fmt = _("Invalid IP protocol %(protocol)s.") + + +class InvalidContentType(Invalid): + msg_fmt = _("Invalid content type %(content_type)s.") + + +class InvalidCidr(Invalid): + msg_fmt = _("Invalid cidr %(cidr)s.") + + +class InvalidUnicodeParameter(Invalid): + msg_fmt = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +class InvalidParameterValue(Invalid): + ec2_code = 'InvalidParameterValue' + msg_fmt = _("%(err)s") + + +class InvalidAggregateAction(Invalid): + msg_fmt = _("Cannot perform action '%(action)s' on aggregate " + "%(aggregate_id)s. Reason: %(reason)s.") + + +class InvalidGroup(Invalid): + msg_fmt = _("Group not valid. Reason: %(reason)s") + + +class InvalidSortKey(Invalid): + msg_fmt = _("Sort key supplied was not valid.") + + +class InstanceInvalidState(Invalid): + msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " + "%(method)s while the instance is in this state.") + + +class InstanceNotRunning(Invalid): + msg_fmt = _("Instance %(instance_id)s is not running.") + + +class InstanceNotInRescueMode(Invalid): + msg_fmt = _("Instance %(instance_id)s is not in rescue mode") + + +class InstanceNotRescuable(Invalid): + msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s") + + +class InstanceNotReady(Invalid): + msg_fmt = _("Instance %(instance_id)s is not ready") + + +class InstanceSuspendFailure(Invalid): + msg_fmt = _("Failed to suspend instance: %(reason)s") + + +class InstanceResumeFailure(Invalid): + msg_fmt = _("Failed to resume instance: %(reason)s") + + +class InstancePowerOnFailure(Invalid): + msg_fmt = _("Failed to power on instance: %(reason)s") + + +class InstancePowerOffFailure(Invalid): + msg_fmt = _("Failed to power off instance: %(reason)s") + + +class InstanceRebootFailure(Invalid): + msg_fmt = _("Failed to reboot instance: %(reason)s") + + +class InstanceTerminationFailure(Invalid): + msg_fmt = _("Failed to terminate instance: %(reason)s") + + +class InstanceDeployFailure(Invalid): + msg_fmt = _("Failed to deploy instance: %(reason)s") + + +class MultiplePortsNotApplicable(Invalid): + msg_fmt = _("Failed to launch instances: %(reason)s") + + +class ServiceUnavailable(Invalid): + msg_fmt = _("Service is unavailable at this time.") + + +class ComputeResourcesUnavailable(ServiceUnavailable): + msg_fmt = _("Insufficient compute resources: %(reason)s.") + + +class HypervisorUnavailable(RackException): + msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s") + + +class ComputeServiceUnavailable(ServiceUnavailable): + msg_fmt = _("Compute service of %(host)s is unavailable at this time.") + + +class ComputeServiceInUse(RackException): + msg_fmt = _("Compute service of %(host)s is still in use.") + + +class UnableToMigrateToSelf(Invalid): + msg_fmt = _("Unable to migrate instance (%(instance_id)s) " + "to current host (%(host)s).") + + +class InvalidHypervisorType(Invalid): + msg_fmt = _("The supplied hypervisor type of is invalid.") + + +class DestinationHypervisorTooOld(Invalid): + msg_fmt = _("The instance requires a newer hypervisor version than " + "has been provided.") + + +class DestinationDiskExists(Invalid): + msg_fmt = _("The supplied disk path (%(path)s) already exists, " + "it is expected not to exist.") + + +class InvalidDevicePath(Invalid): + msg_fmt = _("The supplied device path (%(path)s) is invalid.") + + +class DevicePathInUse(Invalid): + msg_fmt = _("The supplied device path (%(path)s) is in use.") + code = 409 + + +class DeviceIsBusy(Invalid): + msg_fmt = _("The supplied device (%(device)s) is busy.") + + +class InvalidCPUInfo(Invalid): + msg_fmt = _("Unacceptable CPU info: %(reason)s") + + +class InvalidIpAddressError(Invalid): + msg_fmt = _("%(address)s is not a valid IP v4/6 address.") + + +class InvalidVLANTag(Invalid): + msg_fmt = _("VLAN tag is not appropriate for the port group " + "%(bridge)s. Expected VLAN tag is %(tag)s, " + "but the one associated with the port group is %(pgroup)s.") + + +class InvalidVLANPortGroup(Invalid): + msg_fmt = _("vSwitch which contains the port group %(bridge)s is " + "not associated with the desired physical adapter. " + "Expected vSwitch is %(expected)s, but the one associated " + "is %(actual)s.") + + +class InvalidDiskFormat(Invalid): + msg_fmt = _("Disk format %(disk_format)s is not acceptable") + + +class InvalidDiskInfo(Invalid): + msg_fmt = _("Disk info file is invalid: %(reason)s") + + +class DiskInfoReadWriteFail(Invalid): + msg_fmt = _("Failed to read or write disk info file: %(reason)s") + + +class ImageUnacceptable(Invalid): + msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s") + + +class InstanceUnacceptable(Invalid): + msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s") + + +class InvalidEc2Id(Invalid): + msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.") + + +class InvalidUUID(Invalid): + msg_fmt = _("Expected a uuid but received %(uuid)s.") + + +class InvalidID(Invalid): + msg_fmt = _("Invalid ID received %(id)s.") + + +class ConstraintNotMet(RackException): + msg_fmt = _("Constraint not met.") + code = 412 + + +class NotFound(RackException): + msg_fmt = _("Resource could not be found.") + code = 404 + + +class AgentBuildNotFound(NotFound): + msg_fmt = _("No agent-build associated with id %(id)s.") + + +class AgentBuildExists(RackException): + msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s " + "architecture %(architecture)s exists.") + + +class VolumeNotFound(NotFound): + ec2_code = 'InvalidVolumeID.NotFound' + msg_fmt = _("Volume %(volume_id)s could not be found.") + + +class VolumeBDMNotFound(NotFound): + msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.") + + +class SnapshotNotFound(NotFound): + ec2_code = 'InvalidSnapshotID.NotFound' + msg_fmt = _("Snapshot %(snapshot_id)s could not be found.") + + +class DiskNotFound(NotFound): + msg_fmt = _("No disk at %(location)s") + + +class VolumeDriverNotFound(NotFound): + msg_fmt = _("Could not find a handler for %(driver_type)s volume.") + + +class InvalidImageRef(Invalid): + msg_fmt = _("Invalid image href %(image_href)s.") + + +class AutoDiskConfigDisabledByImage(Invalid): + msg_fmt = _("Requested image %(image)s " + "has automatic disk resize disabled.") + + +class ImageNotFound(NotFound): + msg_fmt = _("Image %(image_id)s could not be found.") + + +class PreserveEphemeralNotSupported(Invalid): + msg_fmt = _("The current driver does not support " + "preserving ephemeral partitions.") + + +class ImageNotFoundEC2(ImageNotFound): + msg_fmt = _("Image %(image_id)s could not be found. The rack EC2 API " + "assigns image ids dynamically when they are listed for the " + "first time. Have you listed image ids since adding this " + "image?") + + +class ProjectNotFound(NotFound): + msg_fmt = _("Project %(project_id)s could not be found.") + + +class StorageRepositoryNotFound(NotFound): + msg_fmt = _("Cannot find SR to read/write VDI.") + + +class NetworkDuplicated(Invalid): + msg_fmt = _("Network %(network_id)s is duplicated.") + + +class NetworkInUse(RackException): + msg_fmt = _("Network %(network_id)s is still in use.") + + +class NetworkNotCreated(RackException): + msg_fmt = _("%(req)s is required to create a network.") + + +class NetworkNotFound(NotFound): + msg_fmt = _("Network %(network_id)s could not be found.") + + +class PortNotFound(NotFound): + msg_fmt = _("Port id %(port_id)s could not be found.") + + +class NetworkNotFoundForBridge(NetworkNotFound): + msg_fmt = _("Network could not be found for bridge %(bridge)s") + + +class NetworkNotFoundForUUID(NetworkNotFound): + msg_fmt = _("Network could not be found for uuid %(uuid)s") + + +class NetworkNotFoundForCidr(NetworkNotFound): + msg_fmt = _("Network could not be found with cidr %(cidr)s.") + + +class NetworkNotFoundForInstance(NetworkNotFound): + msg_fmt = _("Network could not be found for instance %(instance_id)s.") + + +class NoMoreNetworks(RackException): + msg_fmt = _("No more available networks.") + + +class NetworkNotFoundForProject(NotFound): + msg_fmt = _("Either network uuid %(network_uuid)s is not present or " + "is not assigned to the project %(project_id)s.") + + +class NetworkAmbiguous(Invalid): + msg_fmt = _("More than one possible network found. Specify " + "network ID(s) to select which one(s) to connect to,") + + +class NetworkRequiresSubnet(Invalid): + msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot" + " instances on.") + + +class ExternalNetworkAttachForbidden(NotAuthorized): + msg_fmt = _("It is not allowed to create an interface on " + "external network %(network_uuid)s") + + +class DatastoreNotFound(NotFound): + msg_fmt = _("Could not find the datastore reference(s) which the VM uses.") + + +class PortInUse(Invalid): + msg_fmt = _("Port %(port_id)s is still in use.") + + +class PortRequiresFixedIP(Invalid): + msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.") + + +class PortNotUsable(Invalid): + msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.") + + +class PortNotFree(Invalid): + msg_fmt = _("No free port available for instance %(instance)s.") + + +class FixedIpExists(RackException): + msg_fmt = _("Fixed ip %(address)s already exists.") + + +class FixedIpNotFound(NotFound): + msg_fmt = _("No fixed IP associated with id %(id)s.") + + +class FixedIpNotFoundForAddress(FixedIpNotFound): + msg_fmt = _("Fixed ip not found for address %(address)s.") + + +class FixedIpNotFoundForInstance(FixedIpNotFound): + msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.") + + +class FixedIpNotFoundForNetworkHost(FixedIpNotFound): + msg_fmt = _("Network host %(host)s has zero fixed ips " + "in network %(network_id)s.") + + +class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): + msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.") + + +class FixedIpNotFoundForNetwork(FixedIpNotFound): + msg_fmt = _("Fixed IP address (%(address)s) does not exist in " + "network (%(network_uuid)s).") + + +class FixedIpAlreadyInUse(RackException): + msg_fmt = _("Fixed IP address %(address)s is already in use on instance " + "%(instance_uuid)s.") + + +class FixedIpAssociatedWithMultipleInstances(RackException): + msg_fmt = _("More than one instance is associated with fixed ip address " + "'%(address)s'.") + + +class FixedIpInvalid(Invalid): + msg_fmt = _("Fixed IP address %(address)s is invalid.") + + +class NoMoreFixedIps(RackException): + ec2_code = 'UnsupportedOperation' + msg_fmt = _("Zero fixed ips available.") + + +class NoFixedIpsDefined(NotFound): + msg_fmt = _("Zero fixed ips could be found.") + + +class FloatingIpExists(RackException): + msg_fmt = _("Floating ip %(address)s already exists.") + + +class FloatingIpNotFound(NotFound): + ec2_code = "UnsupportedOperation" + msg_fmt = _("Floating ip not found for id %(id)s.") + + +class FloatingIpDNSExists(Invalid): + msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.") + + +class FloatingIpNotFoundForAddress(FloatingIpNotFound): + msg_fmt = _("Floating ip not found for address %(address)s.") + + +class FloatingIpNotFoundForHost(FloatingIpNotFound): + msg_fmt = _("Floating ip not found for host %(host)s.") + + +class FloatingIpMultipleFoundForAddress(RackException): + msg_fmt = _("Multiple floating ips are found for address %(address)s.") + + +class FloatingIpPoolNotFound(NotFound): + msg_fmt = _("Floating ip pool not found.") + safe = True + + +class NoMoreFloatingIps(FloatingIpNotFound): + msg_fmt = _("Zero floating ips available.") + safe = True + + +class FloatingIpAssociated(RackException): + ec2_code = "UnsupportedOperation" + msg_fmt = _("Floating ip %(address)s is associated.") + + +class FloatingIpNotAssociated(RackException): + msg_fmt = _("Floating ip %(address)s is not associated.") + + +class NoFloatingIpsDefined(NotFound): + msg_fmt = _("Zero floating ips exist.") + + +class NoFloatingIpInterface(NotFound): + ec2_code = "UnsupportedOperation" + msg_fmt = _("Interface %(interface)s not found.") + + +class CannotDisassociateAutoAssignedFloatingIP(RackException): + ec2_code = "UnsupportedOperation" + msg_fmt = _("Cannot disassociate auto assigned floating ip") + + +class ServiceNotFound(NotFound): + msg_fmt = _("Service %(service_id)s could not be found.") + + +class ServiceBinaryExists(RackException): + msg_fmt = _("Service with host %(host)s binary %(binary)s exists.") + + +class ServiceTopicExists(RackException): + msg_fmt = _("Service with host %(host)s topic %(topic)s exists.") + + +class HostNotFound(NotFound): + msg_fmt = _("Host %(host)s could not be found.") + + +class ComputeHostNotFound(HostNotFound): + msg_fmt = _("Compute host %(host)s could not be found.") + + +class HostBinaryNotFound(NotFound): + msg_fmt = _("Could not find binary %(binary)s on host %(host)s.") + + +class InvalidReservationExpiration(Invalid): + msg_fmt = _("Invalid reservation expiration %(expire)s.") + + +class InvalidQuotaValue(Invalid): + msg_fmt = _("Change would make usage less than 0 for the following " + "resources: %(unders)s") + + +class QuotaNotFound(NotFound): + msg_fmt = _("Quota could not be found") + + +class QuotaExists(RackException): + msg_fmt = _("Quota exists for project %(project_id)s, " + "resource %(resource)s") + + +class QuotaResourceUnknown(QuotaNotFound): + msg_fmt = _("Unknown quota resources %(unknown)s.") + + +class ProjectUserQuotaNotFound(QuotaNotFound): + msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s " + "could not be found.") + + +class ProjectQuotaNotFound(QuotaNotFound): + msg_fmt = _("Quota for project %(project_id)s could not be found.") + + +class QuotaClassNotFound(QuotaNotFound): + msg_fmt = _("Quota class %(class_name)s could not be found.") + + +class QuotaUsageNotFound(QuotaNotFound): + msg_fmt = _("Quota usage for project %(project_id)s could not be found.") + + +class ReservationNotFound(QuotaNotFound): + msg_fmt = _("Quota reservation %(uuid)s could not be found.") + + +class OverQuota(RackException): + msg_fmt = _("Quota exceeded for resources: %(overs)s") + + +class SecurityGroupExists(Invalid): + ec2_code = 'InvalidGroup.Duplicate' + msg_fmt = _("Security group %(security_group_name)s already exists " + "for project %(project_id)s.") + + +class SecurityGroupExistsForInstance(Invalid): + msg_fmt = _("Security group %(security_group_id)s is already associated" + " with the instance %(instance_id)s") + + +class SecurityGroupNotExistsForInstance(Invalid): + msg_fmt = _("Security group %(security_group_id)s is not associated with" + " the instance %(instance_id)s") + + +class SecurityGroupDefaultRuleNotFound(Invalid): + msg_fmt = _("Security group default rule (%rule_id)s not found.") + + +class SecurityGroupCannotBeApplied(Invalid): + msg_fmt = _("Network requires port_security_enabled and subnet associated" + " in order to apply security groups.") + + +class SecurityGroupRuleExists(Invalid): + ec2_code = 'InvalidPermission.Duplicate' + msg_fmt = _("Rule already exists in group: %(rule)s") + + +class NoUniqueMatch(RackException): + msg_fmt = _("No Unique Match Found.") + code = 409 + + +class MigrationNotFound(NotFound): + msg_fmt = _("Migration %(migration_id)s could not be found.") + + +class MigrationNotFoundByStatus(MigrationNotFound): + msg_fmt = _("Migration not found for instance %(instance_id)s " + "with status %(status)s.") + + +class ConsolePoolNotFound(NotFound): + msg_fmt = _("Console pool %(pool_id)s could not be found.") + + +class ConsolePoolExists(RackException): + msg_fmt = _("Console pool with host %(host)s, console_type " + "%(console_type)s and compute_host %(compute_host)s " + "already exists.") + + +class ConsolePoolNotFoundForHostType(NotFound): + msg_fmt = _("Console pool of type %(console_type)s " + "for compute host %(compute_host)s " + "on proxy host %(host)s not found.") + + +class ConsoleNotFound(NotFound): + msg_fmt = _("Console %(console_id)s could not be found.") + + +class ConsoleNotFoundForInstance(ConsoleNotFound): + msg_fmt = _("Console for instance %(instance_uuid)s could not be found.") + + +class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): + msg_fmt = _("Console for instance %(instance_uuid)s " + "in pool %(pool_id)s could not be found.") + + +class ConsoleTypeInvalid(Invalid): + msg_fmt = _("Invalid console type %(console_type)s") + + +class ConsoleTypeUnavailable(Invalid): + msg_fmt = _("Unavailable console type %(console_type)s.") + + +class ConsolePortRangeExhausted(RackException): + msg_fmt = _("The console port range %(min_port)d-%(max_port)d is " + "exhausted.") + + +class FlavorNotFound(NotFound): + msg_fmt = _("Flavor %(flavor_id)s could not be found.") + + +class FlavorNotFoundByName(FlavorNotFound): + msg_fmt = _("Flavor with name %(flavor_name)s could not be found.") + + +class FlavorAccessNotFound(NotFound): + msg_fmt = _("Flavor access not found for %(flavor_id)s / " + "%(project_id)s combination.") + + +class CellNotFound(NotFound): + msg_fmt = _("Cell %(cell_name)s doesn't exist.") + + +class CellExists(RackException): + msg_fmt = _("Cell with name %(name)s already exists.") + + +class CellRoutingInconsistency(RackException): + msg_fmt = _("Inconsistency in cell routing: %(reason)s") + + +class CellServiceAPIMethodNotFound(NotFound): + msg_fmt = _("Service API method not found: %(detail)s") + + +class CellTimeout(NotFound): + msg_fmt = _("Timeout waiting for response from cell") + + +class CellMaxHopCountReached(RackException): + msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s") + + +class NoCellsAvailable(RackException): + msg_fmt = _("No cells available matching scheduling criteria.") + + +class CellsUpdateUnsupported(RackException): + msg_fmt = _("Cannot update cells configuration file.") + + +class InstanceUnknownCell(NotFound): + msg_fmt = _("Cell is not known for instance %(instance_uuid)s") + + +class SchedulerHostFilterNotFound(NotFound): + msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.") + + +class FlavorExtraSpecsNotFound(NotFound): + msg_fmt = _("Flavor %(flavor_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + +class ComputeHostMetricNotFound(NotFound): + msg_fmt = _("Metric %(name)s could not be found on the compute " + "host node %(host)s.%(node)s.") + + +class FileNotFound(NotFound): + msg_fmt = _("File %(file_path)s could not be found.") + + +class NoFilesFound(NotFound): + msg_fmt = _("Zero files could be found.") + + +class SwitchNotFoundForNetworkAdapter(NotFound): + msg_fmt = _("Virtual switch associated with the " + "network adapter %(adapter)s not found.") + + +class NetworkAdapterNotFound(NotFound): + msg_fmt = _("Network adapter %(adapter)s could not be found.") + + +class ClassNotFound(NotFound): + msg_fmt = _("Class %(class_name)s could not be found: %(exception)s") + + +class NotAllowed(RackException): + msg_fmt = _("Action not allowed.") + + +class ImageRotationNotAllowed(RackException): + msg_fmt = _("Rotation is not allowed for snapshots") + + +class RotationRequiredForBackup(RackException): + msg_fmt = _("Rotation param is required for backup image_type") + + +class KeyPairExists(RackException): + ec2_code = 'InvalidKeyPair.Duplicate' + msg_fmt = _("Key pair '%(key_name)s' already exists.") + + +class InstanceExists(RackException): + msg_fmt = _("Instance %(name)s already exists.") + + +class FlavorExists(RackException): + msg_fmt = _("Flavor with name %(name)s already exists.") + + +class FlavorIdExists(RackException): + msg_fmt = _("Flavor with ID %(flavor_id)s already exists.") + + +class FlavorAccessExists(RackException): + msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s " + "and project %(project_id)s combination.") + + +class InvalidSharedStorage(RackException): + msg_fmt = _("%(path)s is not on shared storage: %(reason)s") + + +class InvalidLocalStorage(RackException): + msg_fmt = _("%(path)s is not on local storage: %(reason)s") + + +class MigrationError(RackException): + msg_fmt = _("Migration error: %(reason)s") + + +class MigrationPreCheckError(MigrationError): + msg_fmt = _("Migration pre-check error: %(reason)s") + + +class MalformedRequestBody(RackException): + msg_fmt = _("Malformed message body: %(reason)s") + + +class ConfigNotFound(RackException): + msg_fmt = _("Could not find config at %(path)s") + + +class PasteAppNotFound(RackException): + msg_fmt = _("Could not load paste app '%(name)s' from %(path)s") + + +class CannotResizeToSameFlavor(RackException): + msg_fmt = _("When resizing, instances must change flavor!") + + +class ResizeError(RackException): + msg_fmt = _("Resize error: %(reason)s") + + +class CannotResizeDisk(RackException): + msg_fmt = _("Server disk was unable to be resized because: %(reason)s") + + +class FlavorMemoryTooSmall(RackException): + msg_fmt = _("Flavor's memory is too small for requested image.") + + +class FlavorDiskTooSmall(RackException): + msg_fmt = _("Flavor's disk is too small for requested image.") + + +class InsufficientFreeMemory(RackException): + msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.") + + +class NoValidHost(RackException): + msg_fmt = _("No valid host was found. %(reason)s") + + +class QuotaError(RackException): + ec2_code = 'ResourceLimitExceeded' + msg_fmt = _("Quota exceeded: code=%(code)s") + code = 413 + headers = {'Retry-After': 0} + safe = True + + +class TooManyInstances(QuotaError): + msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s," + " but already used %(used)d of %(allowed)d %(resource)s") + + +class FloatingIpLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of floating ips exceeded") + + +class FixedIpLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of fixed ips exceeded") + + +class MetadataLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d") + + +class OnsetFileLimitExceeded(QuotaError): + msg_fmt = _("Personality file limit exceeded") + + +class OnsetFilePathLimitExceeded(QuotaError): + msg_fmt = _("Personality file path too long") + + +class OnsetFileContentLimitExceeded(QuotaError): + msg_fmt = _("Personality file content too long") + + +class KeypairLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of key pairs exceeded") + + +class SecurityGroupLimitExceeded(QuotaError): + ec2_code = 'SecurityGroupLimitExceeded' + msg_fmt = _("Maximum number of security groups or rules exceeded") + + +class PortLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of ports exceeded") + + +class AggregateError(RackException): + msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' " + "caused an error: %(reason)s.") + + +class AggregateNotFound(NotFound): + msg_fmt = _("Aggregate %(aggregate_id)s could not be found.") + + +class AggregateNameExists(RackException): + msg_fmt = _("Aggregate %(aggregate_name)s already exists.") + + +class AggregateHostNotFound(NotFound): + msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.") + + +class AggregateMetadataNotFound(NotFound): + msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with " + "key %(metadata_key)s.") + + +class AggregateHostExists(RackException): + msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.") + + +class FlavorCreateFailed(RackException): + msg_fmt = _("Unable to create flavor") + + +class InstancePasswordSetFailed(RackException): + msg_fmt = _("Failed to set admin password on %(instance)s " + "because %(reason)s") + safe = True + + +class DuplicateVlan(RackException): + msg_fmt = _("Detected existing vlan with id %(vlan)d") + + +class CidrConflict(RackException): + msg_fmt = _("There was a conflict when trying to complete your request.") + code = 409 + + +class InstanceNotFound(NotFound): + ec2_code = 'InvalidInstanceID.NotFound' + msg_fmt = _("Instance %(instance_id)s could not be found.") + + +class InstanceInfoCacheNotFound(NotFound): + msg_fmt = _("Info cache for instance %(instance_uuid)s could not be " + "found.") + + +class NodeNotFound(NotFound): + msg_fmt = _("Node %(node_id)s could not be found.") + + +class NodeNotFoundByUUID(NotFound): + msg_fmt = _("Node with UUID %(node_uuid)s could not be found.") + + +class MarkerNotFound(NotFound): + msg_fmt = _("Marker %(marker)s could not be found.") + + +class InvalidInstanceIDMalformed(Invalid): + ec2_code = 'InvalidInstanceID.Malformed' + msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").") + + +class CouldNotFetchImage(RackException): + msg_fmt = _("Could not fetch image %(image_id)s") + + +class CouldNotUploadImage(RackException): + msg_fmt = _("Could not upload image %(image_id)s") + + +class TaskAlreadyRunning(RackException): + msg_fmt = _("Task %(task_name)s is already running on host %(host)s") + + +class TaskNotRunning(RackException): + msg_fmt = _("Task %(task_name)s is not running on host %(host)s") + + +class InstanceIsLocked(InstanceInvalidState): + msg_fmt = _("Instance %(instance_uuid)s is locked") + + +class ConfigDriveInvalidValue(Invalid): + msg_fmt = _("Invalid value for Config Drive option: %(option)s") + + +class ConfigDriveMountFailed(RackException): + msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. " + "Error: %(error)s") + + +class ConfigDriveUnknownFormat(RackException): + msg_fmt = _("Unknown config drive format %(format)s. Select one of " + "iso9660 or vfat.") + + +class InterfaceAttachFailed(Invalid): + msg_fmt = _("Failed to attach network adapter device to %(instance)s") + + +class InterfaceDetachFailed(Invalid): + msg_fmt = _("Failed to detach network adapter device from %(instance)s") + + +class InstanceUserDataTooLarge(RackException): + msg_fmt = _("User data too large. User data must be no larger than " + "%(maxsize)s bytes once base64 encoded. Your data is " + "%(length)d bytes") + + +class InstanceUserDataMalformed(RackException): + msg_fmt = _("User data needs to be valid base 64.") + + +class UnexpectedTaskStateError(RackException): + msg_fmt = _("Unexpected task state: expecting %(expected)s but " + "the actual state is %(actual)s") + + +class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError): + pass + + +class InstanceActionNotFound(RackException): + msg_fmt = _("Action for request_id %(request_id)s on instance" + " %(instance_uuid)s not found") + + +class InstanceActionEventNotFound(RackException): + msg_fmt = _("Event %(event)s not found for action id %(action_id)s") + + +class UnexpectedVMStateError(RackException): + msg_fmt = _("Unexpected VM state: expecting %(expected)s but " + "the actual state is %(actual)s") + + +class CryptoCAFileNotFound(FileNotFound): + msg_fmt = _("The CA file for %(project)s could not be found") + + +class CryptoCRLFileNotFound(FileNotFound): + msg_fmt = _("The CRL file for %(project)s could not be found") + + +class InstanceRecreateNotSupported(Invalid): + msg_fmt = _('Instance recreate is not supported.') + + +class ServiceGroupUnavailable(RackException): + msg_fmt = _("The service from servicegroup driver %(driver)s is " + "temporarily unavailable.") + + +class DBNotAllowed(RackException): + msg_fmt = _('%(binary)s attempted direct database access which is ' + 'not allowed by policy') + + +class UnsupportedVirtType(Invalid): + msg_fmt = _("Virtualization type '%(virt)s' is not supported by " + "this compute driver") + + +class UnsupportedHardware(Invalid): + msg_fmt = _("Requested hardware '%(model)s' is not supported by " + "the '%(virt)s' virt driver") + + +class Base64Exception(RackException): + msg_fmt = _("Invalid Base 64 data for file %(path)s") + + +class BuildAbortException(RackException): + msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s") + + +class RescheduledException(RackException): + msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: " + "%(reason)s") + + +class ShadowTableExists(RackException): + msg_fmt = _("Shadow table with name %(name)s already exists.") + + +class InstanceFaultRollback(RackException): + def __init__(self, inner_exception=None): + message = _("Instance rollback performed due to: %s") + self.inner_exception = inner_exception + super(InstanceFaultRollback, self).__init__(message % inner_exception) + + +class UnsupportedObjectError(RackException): + msg_fmt = _('Unsupported object type %(objtype)s') + + +class OrphanedObjectError(RackException): + msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object') + + +class IncompatibleObjectVersion(RackException): + msg_fmt = _('Version %(objver)s of %(objname)s is not supported') + + +class ObjectActionError(RackException): + msg_fmt = _('Object action %(action)s failed because: %(reason)s') + + +class CoreAPIMissing(RackException): + msg_fmt = _("Core API extensions are missing: %(missing_apis)s") + + +class AgentError(RackException): + msg_fmt = _('Error during following call to agent: %(method)s') + + +class AgentTimeout(AgentError): + msg_fmt = _('Unable to contact guest agent. ' + 'The following call timed out: %(method)s') + + +class AgentNotImplemented(AgentError): + msg_fmt = _('Agent does not support the call: %(method)s') + + +class InstanceGroupNotFound(NotFound): + msg_fmt = _("Instance group %(group_uuid)s could not be found.") + + +class InstanceGroupIdExists(RackException): + msg_fmt = _("Instance group %(group_uuid)s already exists.") + + +class InstanceGroupMetadataNotFound(NotFound): + msg_fmt = _("Instance group %(group_uuid)s has no metadata with " + "key %(metadata_key)s.") + + +class InstanceGroupMemberNotFound(NotFound): + msg_fmt = _("Instance group %(group_uuid)s has no member with " + "id %(instance_id)s.") + + +class InstanceGroupPolicyNotFound(NotFound): + msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.") + + +class PluginRetriesExceeded(RackException): + msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.") + + +class ImageDownloadModuleError(RackException): + msg_fmt = _("There was an error with the download module %(module)s. " + "%(reason)s") + + +class ImageDownloadModuleMetaDataError(ImageDownloadModuleError): + msg_fmt = _("The metadata for this location will not work with this " + "module %(module)s. %(reason)s.") + + +class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError): + msg_fmt = _("The method %(method_name)s is not implemented.") + + +class ImageDownloadModuleConfigurationError(ImageDownloadModuleError): + msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.") + + +class ResourceMonitorError(RackException): + msg_fmt = _("Error when creating resource monitor: %(monitor)s") + + +class PciDeviceWrongAddressFormat(RackException): + msg_fmt = _("The PCI address %(address)s has an incorrect format.") + + +class PciDeviceNotFoundById(NotFound): + msg_fmt = _("PCI device %(id)s not found") + + +class PciDeviceNotFound(RackException): + msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.") + + +class PciDeviceInvalidStatus(RackException): + msg_fmt = _( + "PCI device %(compute_node_id)s:%(address)s is %(status)s " + "instead of %(hopestatus)s") + + +class PciDeviceInvalidOwner(RackException): + msg_fmt = _( + "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s " + "instead of %(hopeowner)s") + + +class PciDeviceRequestFailed(RackException): + msg_fmt = _( + "PCI device request (%requests)s failed") + + +class PciDevicePoolEmpty(RackException): + msg_fmt = _( + "Attempt to consume PCI device %(compute_node_id)s:%(address)s " + "from empty pool") + + +class PciInvalidAlias(RackException): + msg_fmt = _("Invalid PCI alias definition: %(reason)s") + + +class PciRequestAliasNotDefined(RackException): + msg_fmt = _("PCI alias %(alias)s is not defined") + + +class MissingParameter(RackException): + ec2_code = 'MissingParameter' + msg_fmt = _("Not enough parameters: %(reason)s") + code = 400 + + +class PciConfigInvalidWhitelist(Invalid): + msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s") + + +class PciTrackerInvalidNodeId(RackException): + msg_fmt = _("Cannot change %(node_id)s to %(new_node_id)s") + + +class InternalError(RackException): + ec2_code = 'InternalError' + msg_fmt = "%(err)s" + + +class PciDevicePrepareFailed(RackException): + msg_fmt = _("Failed to prepare PCI device %(id)s for instance " + "%(instance_uuid)s: %(reason)s") + + +class PciDeviceDetachFailed(RackException): + msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s") + + +class PciDeviceUnsupportedHypervisor(RackException): + msg_fmt = _("%(type)s hypervisor does not support PCI devices") + + +class KeyManagerError(RackException): + msg_fmt = _("Key manager error: %(reason)s") + + +class InvalidVideoMode(Invalid): + msg_fmt = _("Provided video model (%(model)s) is not supported.") + + +class RngDeviceNotExist(Invalid): + msg_fmt = _("The provided RNG device path: (%(path)s) is not " + "present on the host.") + + +class RequestedVRamTooHigh(RackException): + msg_fmt = _("The requested amount of video memory %(req_vram)d is higher " + "than the maximum allowed by flavor %(max_vram)d.") + + +class InvalidWatchdogAction(Invalid): + msg_fmt = _("Provided watchdog action (%(action)s) is not supported.") + + +class NoBlockMigrationForConfigDriveInLibVirt(RackException): + msg_fmt = _("Block migration of instances with config drives is not " + "supported in libvirt.") + +class ServiceCatalogException(RackException): + msg_fmt = _("Invalid service catalog service: %(service_type)s'") + +class GroupCreateFailed(RackException): + msg_fmt = _("Unable to create group") + +class GroupIndexFailed(RackException): + msg_fmt = _("Unable to index group") + +class GroupNotFound(NotFound): + msg_fmt = _("Group %(gid)s could not be found.") + +class GroupInUse(RackException): + msg_fmt = _("Group %(gid)s is still in use.") + +class GroupDeleteFailed(RackException): + msg_fmt = _('Unable to delete Group') + +class NetworkCreateFailed(RackException): + msg_fmt = _("Unable to create network") + +class NetworkIndexFailed(RackException): + msg_fmt = _("Unable to index network") + +class NetworkShowFailed(RackException): + msg_fmt = _("Unable to show network") + +class NetworkDeleteFailed(RackException): + msg_fmt = _("Unable to delete network") + +class KeypairNotFound(NotFound): + msg_fmt = _("Keypair %(keypair_id)s could not be found.") + +class KeypairCreateFailed(RackException): + msg_fmt = _("Unable to create keypair") + +class KeypairDeleteFailed(RackException): + msg_fmt = _("Unable to delete keypair") + +class keypairInUse(RackException): + msg_fmt = _("Keypair %(keypair_id)s is still in use.") + +class InvalidOpenStackCredential(Invalid): + msg_fmt = _("OpenStack credential %(credential)s is required.") + + +class SecuritygroupNotFound(NotFound): + msg_fmt = _("Security group %(securitygroup_id)s not found.") + + +class SecuritygroupCreateFailed(RackException): + msg_fmt = _("Unable to create Securitygroup") + + +class SecuritygroupDeleteFailed(RackException): + msg_fmt = _("Unable to delete Securitygroup") + + +class SecuritygroupruleNotFound(NotFound): + msg_fmt = _("Securitygrouprule %(rule_id)s could not be found.") + + +class SecuritygroupruleCreateFailed(RackException): + msg_fmt = _("Unable to create Securitygrouprule") + + +class SecuritygroupruleDeleteFailed(RackException): + msg_fmt = _("Unable to delete Securitygrouprule") + + +class SecuritygroupInUse(RackException): + msg_fmt = _("Securitygroup %(securitygroup_id)s is still in use.") + + +class ProcessCreateFailed(RackException): + msg_fmt = _("Unable to create Process") + + +class ProcessDeleteFailed(RackException): + msg_fmt = _("Unable to delete Process") + + +class ProcessNotFound(NotFound): + msg_fmt = _("Process %(pid)s could not be found.") + +class NoNetworksFound(NotFound): + msg_fmt = _("No networks defined for gid %(gid)s.") diff --git a/rack/manager.py b/rack/manager.py new file mode 100644 index 0000000..1990b5a --- /dev/null +++ b/rack/manager.py @@ -0,0 +1,114 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base Manager class. + +Managers are responsible for a certain aspect of the system. It is a logical +grouping of code relating to a portion of the system. In general other +components should be using the manager to make changes to the components that +it is responsible for. + +For example, other components that need to deal with volumes in some way, +should do so by calling methods on the VolumeManager instead of directly +changing fields in the database. This allows us to keep all of the code +relating to volumes in the same place. + +We have adopted a basic strategy of Smart managers and dumb data, which means +rather than attaching methods to data objects, components should call manager +methods that act on the data. + +Methods on managers that can be executed locally should be called directly. If +a particular method must execute on a remote host, this should be done via rpc +to the service that wraps the manager + +Managers should be responsible for most of the db access, and +non-implementation specific data. Anything implementation specific that can't +be generalized should be done by the Driver. + +In general, we prefer to have one manager with multiple drivers for different +implementations, but sometimes it makes sense to have multiple managers. You +can think of it this way: Abstract different overall strategies at the manager +level(FlatNetwork vs VlanNetwork), and different implementations at the driver +level(LinuxNetDriver vs CiscoNetDriver). + +Managers will often provide methods for initial setup of a host or periodic +tasks to a wrapping service. + +This module provides Manager, a base class for managers. + +""" + +from oslo.config import cfg + +from rack.db import base +from rack.openstack.common import log as logging +from rack.openstack.common import periodic_task +from rack import rpc + + +CONF = cfg.CONF +CONF.import_opt('host', 'rack.netconf') +LOG = logging.getLogger(__name__) + + +class Manager(base.Base, periodic_task.PeriodicTasks): + + def __init__(self, host=None, db_driver=None, service_name='undefined'): + if not host: + host = CONF.host + self.host = host + self.backdoor_port = None + self.service_name = service_name + self.notifier = rpc.get_notifier(self.service_name, self.host) + self.additional_endpoints = [] + super(Manager, self).__init__(db_driver) + + def periodic_tasks(self, context, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + return self.run_periodic_tasks(context, raise_on_error=raise_on_error) + + def init_host(self): + """Hook to do additional manager initialization when one requests + the service be started. This is called before any service record + is created. + + Child classes should override this method. + """ + pass + + def cleanup_host(self): + """Hook to do cleanup work when the service shuts down. + + Child classes should override this method. + """ + pass + + def pre_start_hook(self): + """Hook to provide the manager the ability to do additional + start-up work before any RPC queues/consumers are created. This is + called after other initialization has succeeded and a service + record is created. + + Child classes should override this method. + """ + pass + + def post_start_hook(self): + """Hook to provide the manager the ability to do additional + start-up work immediately after a service creates RPC consumers + and starts 'running'. + + Child classes should override this method. + """ + pass diff --git a/rack/netconf.py b/rack/netconf.py new file mode 100644 index 0000000..a8139d9 --- /dev/null +++ b/rack/netconf.py @@ -0,0 +1,58 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import socket + +from oslo.config import cfg + +from rack import utils + +CONF = cfg.CONF + + +def _get_my_ip(): + """Returns the actual ip of the local machine. + + This code figures out what source address would be used if some traffic + were to be sent out to some well known address on the Internet. In this + case, a Google DNS server is used, but the specific address does not + matter much. No traffic is actually sent. + """ + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.error: + return utils.get_my_ipv4_address() + + +netconf_opts = [ + cfg.StrOpt('my_ip', + default=_get_my_ip(), + help='IP address of this host'), + cfg.StrOpt('host', + default=socket.gethostname(), + help='Name of this node. This can be an opaque identifier. ' + 'It is not necessarily a hostname, FQDN, or IP address. ' + 'However, the node name must be valid within ' + 'an AMQP key, and if using ZeroMQ, a valid ' + 'hostname, FQDN, or IP address'), + cfg.BoolOpt('use_ipv6', + default=False, + help='Use IPv6'), +] + +CONF.register_opts(netconf_opts) diff --git a/rack/object.py b/rack/object.py new file mode 100644 index 0000000..bcce7cd --- /dev/null +++ b/rack/object.py @@ -0,0 +1,569 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import collections +import copy +import functools + +import netaddr +from oslo import messaging +import six + +from rack import context +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import versionutils + + +LOG = logging.getLogger('object') + + +class NotSpecifiedSentinel: + pass + + +def get_attrname(name): + """Return the mangled name of the attribute's underlying storage.""" + return '_%s' % name + + +def make_class_properties(cls): + # NOTE(danms/comstud): Inherit fields from super classes. + # mro() returns the current class first and returns 'object' last, so + # those can be skipped. Also be careful to not overwrite any fields + # that already exist. And make sure each cls has its own copy of + # fields and that it is not sharing the dict with a super class. + cls.fields = dict(cls.fields) + for supercls in cls.mro()[1:-1]: + if not hasattr(supercls, 'fields'): + continue + for name, field in supercls.fields.items(): + if name not in cls.fields: + cls.fields[name] = field + for name, field in cls.fields.iteritems(): + + def getter(self, name=name): + attrname = get_attrname(name) + if not hasattr(self, attrname): + self.obj_load_attr(name) + return getattr(self, attrname) + + def setter(self, value, name=name, field=field): + self._changed_fields.add(name) + try: + return setattr(self, get_attrname(name), + field.coerce(self, name, value)) + except Exception: + attr = "%s.%s" % (self.obj_name(), name) + LOG.exception(_('Error setting %(attr)s') % + {'attr': attr}) + raise + + setattr(cls, name, property(getter, setter)) + + +class RackObjectMetaclass(type): + """Metaclass that allows tracking of object classes.""" + + # NOTE(danms): This is what controls whether object operations are + # remoted. If this is not None, use it to remote things over RPC. + indirection_api = None + + def __init__(cls, names, bases, dict_): + if not hasattr(cls, '_obj_classes'): + # This will be set in the 'RackObject' class. + cls._obj_classes = collections.defaultdict(list) + else: + # Add the subclass to RackObject._obj_classes + make_class_properties(cls) + cls._obj_classes[cls.obj_name()].append(cls) + + +def remotable_classmethod(fn): + """Decorator for remotable classmethods.""" + @functools.wraps(fn) + def wrapper(cls, context, *args, **kwargs): + if RackObject.indirection_api: + result = RackObject.indirection_api.object_class_action( + context, cls.obj_name(), fn.__name__, cls.VERSION, + args, kwargs) + else: + result = fn(cls, context, *args, **kwargs) + if isinstance(result, RackObject): + result._context = context + return result + return classmethod(wrapper) + + +def remotable(fn): + """Decorator for remotable object methods.""" + @functools.wraps(fn) + def wrapper(self, *args, **kwargs): + ctxt = self._context + try: + if isinstance(args[0], (context.RequestContext)): + ctxt = args[0] + args = args[1:] + except IndexError: + pass + if ctxt is None: + raise exception.OrphanedObjectError(method=fn.__name__, + objtype=self.obj_name()) + # Force this to be set if it wasn't before. + self._context = ctxt + if RackObject.indirection_api: + updates, result = RackObject.indirection_api.object_action( + ctxt, self, fn.__name__, args, kwargs) + for key, value in updates.iteritems(): + if key in self.fields: + field = self.fields[key] + self[key] = field.from_primitive(self, key, value) + self.obj_reset_changes() + self._changed_fields = set(updates.get('obj_what_changed', [])) + return result + else: + return fn(self, ctxt, *args, **kwargs) + return wrapper + + +@six.add_metaclass(RackObjectMetaclass) +class RackObject(object): + """Base class and object factory. + + This forms the base of all objects that can be remoted or instantiated + via RPC. Simply defining a class that inherits from this base class + will make it remotely instantiatable. Objects should implement the + necessary "get" classmethod routines as well as "save" object methods + as appropriate. + """ + + # Object versioning rules + # + # Each service has its set of objects, each with a version attached. When + # a client attempts to call an object method, the server checks to see if + # the version of that object matches (in a compatible way) its object + # implementation. If so, cool, and if not, fail. + VERSION = '1.0' + + # The fields present in this object as key:field pairs. For example: + # + # fields = { 'foo': fields.IntegerField(), + # 'bar': fields.StringField(), + # } + fields = {} + obj_extra_fields = [] + + def __init__(self, context=None, **kwargs): + self._changed_fields = set() + self._context = context + for key in kwargs.keys(): + self[key] = kwargs[key] + + @classmethod + def obj_name(cls): + """Return a canonical name for this object which will be used over + the wire for remote hydration. + """ + return cls.__name__ + + @classmethod + def obj_class_from_name(cls, objname, objver): + """Returns a class from the registry based on a name and version.""" + if objname not in cls._obj_classes: + LOG.error(_('Unable to instantiate unregistered object type ' + '%(objtype)s') % dict(objtype=objname)) + raise exception.UnsupportedObjectError(objtype=objname) + + latest = None + compatible_match = None + for objclass in cls._obj_classes[objname]: + if objclass.VERSION == objver: + return objclass + + version_bits = tuple([int(x) for x in objclass.VERSION.split(".")]) + if latest is None: + latest = version_bits + elif latest < version_bits: + latest = version_bits + + if versionutils.is_compatible(objver, objclass.VERSION): + compatible_match = objclass + + if compatible_match: + return compatible_match + + latest_ver = '%i.%i' % latest + raise exception.IncompatibleObjectVersion(objname=objname, + objver=objver, + supported=latest_ver) + + @classmethod + def _obj_from_primitive(cls, context, objver, primitive): + self = cls() + self._context = context + self.VERSION = objver + objdata = primitive['rack_object.data'] + changes = primitive.get('rack_object.changes', []) + for name, field in self.fields.items(): + if name in objdata: + setattr(self, name, field.from_primitive(self, name, + objdata[name])) + self._changed_fields = set([x for x in changes if x in self.fields]) + return self + + @classmethod + def obj_from_primitive(cls, primitive, context=None): + """Object field-by-field hydration.""" + if primitive['rack_object.namespace'] != 'rack': + # NOTE(danms): We don't do anything with this now, but it's + # there for "the future" + raise exception.UnsupportedObjectError( + objtype='%s.%s' % (primitive['rack_object.namespace'], + primitive['rack_object.name'])) + objname = primitive['rack_object.name'] + objver = primitive['rack_object.version'] + objclass = cls.obj_class_from_name(objname, objver) + return objclass._obj_from_primitive(context, objver, primitive) + + def __deepcopy__(self, memo): + """Efficiently make a deep copy of this object.""" + + # NOTE(danms): A naive deepcopy would copy more than we need, + # and since we have knowledge of the volatile bits of the + # object, we can be smarter here. Also, nested entities within + # some objects may be uncopyable, so we can avoid those sorts + # of issues by copying only our field data. + + nobj = self.__class__() + nobj._context = self._context + for name in self.fields: + if self.obj_attr_is_set(name): + nval = copy.deepcopy(getattr(self, name), memo) + setattr(nobj, name, nval) + nobj._changed_fields = set(self._changed_fields) + return nobj + + def obj_clone(self): + """Create a copy.""" + return copy.deepcopy(self) + + def obj_make_compatible(self, primitive, target_version): + """Make an object representation compatible with a target version. + + This is responsible for taking the primitive representation of + an object and making it suitable for the given target_version. + This may mean converting the format of object attributes, removing + attributes that have been added since the target version, etc. + + :param:primitive: The result of self.obj_to_primitive() + :param:target_version: The version string requested by the recipient + of the object. + :param:raises: rack.exception.UnsupportedObjectError if conversion + is not possible for some reason. + """ + pass + + def obj_to_primitive(self, target_version=None): + """Simple base-case dehydration. + + This calls to_primitive() for each item in fields. + """ + primitive = dict() + for name, field in self.fields.items(): + if self.obj_attr_is_set(name): + primitive[name] = field.to_primitive(self, name, + getattr(self, name)) + if target_version: + self.obj_make_compatible(primitive, target_version) + obj = {'rack_object.name': self.obj_name(), + 'rack_object.namespace': 'rack', + 'rack_object.version': target_version or self.VERSION, + 'rack_object.data': primitive} + if self.obj_what_changed(): + obj['rack_object.changes'] = list(self.obj_what_changed()) + return obj + + def obj_load_attr(self, attrname): + """Load an additional attribute from the real object. + + This should use self._conductor, and cache any data that might + be useful for future load operations. + """ + raise NotImplementedError( + _("Cannot load '%s' in the base class") % attrname) + + def save(self, context): + """Save the changed fields back to the store. + + This is optional for subclasses, but is presented here in the base + class for consistency among those that do. + """ + raise NotImplementedError('Cannot save anything in the base class') + + def obj_what_changed(self): + """Returns a set of fields that have been modified.""" + changes = set(self._changed_fields) + for field in self.fields: + if (self.obj_attr_is_set(field) and + isinstance(self[field], RackObject) and + self[field].obj_what_changed()): + changes.add(field) + return changes + + def obj_get_changes(self): + """Returns a dict of changed fields and their new values.""" + changes = {} + for key in self.obj_what_changed(): + changes[key] = self[key] + return changes + + def obj_reset_changes(self, fields=None): + """Reset the list of fields that have been changed. + + Note that this is NOT "revert to previous values" + """ + if fields: + self._changed_fields -= set(fields) + else: + self._changed_fields.clear() + + def obj_attr_is_set(self, attrname): + """Test object to see if attrname is present. + + Returns True if the named attribute has a value set, or + False if not. Raises AttributeError if attrname is not + a valid attribute for this object. + """ + if attrname not in self.obj_fields: + raise AttributeError( + _("%(objname)s object has no attribute '%(attrname)s'") % + {'objname': self.obj_name(), 'attrname': attrname}) + return hasattr(self, get_attrname(attrname)) + + @property + def obj_fields(self): + return self.fields.keys() + self.obj_extra_fields + + # dictish syntactic sugar + def iteritems(self): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + for name in self.obj_fields: + if (self.obj_attr_is_set(name) or + name in self.obj_extra_fields): + yield name, getattr(self, name) + + items = lambda self: list(self.iteritems()) + + def __getitem__(self, name): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + return getattr(self, name) + + def __setitem__(self, name, value): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + setattr(self, name, value) + + def __contains__(self, name): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + try: + return self.obj_attr_is_set(name) + except AttributeError: + return False + + def get(self, key, value=NotSpecifiedSentinel): + """For backwards-compatibility with dict-based objects. + + NOTE(danms): May be removed in the future. + """ + if key not in self.obj_fields: + raise AttributeError("'%s' object has no attribute '%s'" % ( + self.__class__, key)) + if value != NotSpecifiedSentinel and not self.obj_attr_is_set(key): + return value + else: + return self[key] + + def update(self, updates): + """For backwards-compatibility with dict-base objects. + + NOTE(danms): May be removed in the future. + """ + for key, value in updates.items(): + self[key] = value + + # This is a dictionary of my_version:child_version mappings so that + # we can support backleveling our contents based on the version + # requested of the list object. + child_versions = {} + + def __iter__(self): + """List iterator interface.""" + return iter(self.objects) + + def __len__(self): + """List length.""" + return len(self.objects) + + def __getitem__(self, index): + """List index access.""" + if isinstance(index, slice): + new_obj = self.__class__() + new_obj.objects = self.objects[index] + # NOTE(danms): We must be mixed in with a RackObject! + new_obj.obj_reset_changes() + new_obj._context = self._context + return new_obj + return self.objects[index] + + def __contains__(self, value): + """List membership test.""" + return value in self.objects + + def count(self, value): + """List count of value occurrences.""" + return self.objects.count(value) + + def index(self, value): + """List index of value.""" + return self.objects.index(value) + + def sort(self, cmp=None, key=None, reverse=False): + self.objects.sort(cmp=cmp, key=key, reverse=reverse) + + def _attr_objects_to_primitive(self): + """Serialization of object list.""" + return [x.obj_to_primitive() for x in self.objects] + + def _attr_objects_from_primitive(self, value): + """Deserialization of object list.""" + objects = [] + for entity in value: + obj = RackObject.obj_from_primitive(entity, context=self._context) + objects.append(obj) + return objects + + def obj_make_compatible(self, primitive, target_version): + primitives = primitive['objects'] + child_target_version = self.child_versions.get(target_version, '1.0') + for index, item in enumerate(self.objects): + self.objects[index].obj_make_compatible( + primitives[index]['rack_object.data'], + child_target_version) + primitives[index]['rack_object.version'] = child_target_version + + def obj_what_changed(self): + changes = set(self._changed_fields) + for child in self.objects: + if child.obj_what_changed(): + changes.add('objects') + return changes + + +class RackObjectSerializer(messaging.NoOpSerializer): + + def _process_object(self, context, objprim): + try: + objinst = RackObject.obj_from_primitive(objprim, context=context) + except exception.IncompatibleObjectVersion as e: + objinst = self.conductor.object_backport(context, objprim, + e.kwargs['supported']) + return objinst + + def _process_iterable(self, context, action_fn, values): + """Process an iterable, taking an action on each value. + :param:context: Request context + :param:action_fn: Action to take on each item in values + :param:values: Iterable container of things to take action on + :returns: A new container of the same type (except set) with + items from values having had action applied. + """ + iterable = values.__class__ + if iterable == set: + # NOTE(danms): A set can't have an unhashable value inside, such as + # a dict. Convert sets to tuples, which is fine, since we can't + # send them over RPC anyway. + iterable = tuple + return iterable([action_fn(context, value) for value in values]) + + def serialize_entity(self, context, entity): + if isinstance(entity, (tuple, list, set)): + entity = self._process_iterable(context, self.serialize_entity, + entity) + elif (hasattr(entity, 'obj_to_primitive') and + callable(entity.obj_to_primitive)): + entity = entity.obj_to_primitive() + return entity + + def deserialize_entity(self, context, entity): + if isinstance(entity, dict) and 'rack_object.name' in entity: + entity = self._process_object(context, entity) + elif isinstance(entity, (tuple, list, set)): + entity = self._process_iterable(context, self.deserialize_entity, + entity) + return entity + + +def obj_to_primitive(obj): + """Recursively turn an object into a python primitive. + + A RackObject becomes a dict, and anything that implements ObjectListBase + becomes a list. + """ + if isinstance(obj, ObjectListBase): + return [obj_to_primitive(x) for x in obj] + elif isinstance(obj, RackObject): + result = {} + for key, value in obj.iteritems(): + result[key] = obj_to_primitive(value) + return result + elif isinstance(obj, netaddr.IPAddress): + return str(obj) + elif isinstance(obj, netaddr.IPNetwork): + return str(obj) + else: + return obj + + +def obj_make_list(context, list_obj, item_cls, db_list, **extra_args): + """Construct an object list from a list of primitives. + + This calls item_cls._from_db_object() on each item of db_list, and + adds the resulting object to list_obj. + + :param:context: Request contextr + :param:list_obj: An ObjectListBase object + :param:item_cls: The RackObject class of the objects within the list + :param:db_list: The list of primitives to convert to objects + :param:extra_args: Extra arguments to pass to _from_db_object() + :returns: list_obj + """ + list_obj.objects = [] + for db_item in db_list: + item = item_cls._from_db_object(context, item_cls(), db_item, + **extra_args) + list_obj.objects.append(item) + list_obj._context = context + list_obj.obj_reset_changes() + return list_obj diff --git a/rack/openstack/__init__.py b/rack/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/openstack/common/README b/rack/openstack/common/README new file mode 100644 index 0000000..0700c72 --- /dev/null +++ b/rack/openstack/common/README @@ -0,0 +1,13 @@ +openstack-common +---------------- + +A number of modules from openstack-common are imported into this project. + +These modules are "incubating" in openstack-common and are kept in sync +with the help of openstack-common's update.py script. See: + + https://wiki.openstack.org/wiki/Oslo#Syncing_Code_from_Incubator + +The copy of the code should never be directly modified here. Please +always update openstack-common first and then run the script to copy +the changes across. diff --git a/rack/openstack/common/__init__.py b/rack/openstack/common/__init__.py new file mode 100644 index 0000000..2a00f3b --- /dev/null +++ b/rack/openstack/common/__init__.py @@ -0,0 +1,2 @@ +import six +six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/rack/openstack/common/cliutils.py b/rack/openstack/common/cliutils.py new file mode 100644 index 0000000..411bd58 --- /dev/null +++ b/rack/openstack/common/cliutils.py @@ -0,0 +1,63 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect + + +class MissingArgs(Exception): + + def __init__(self, missing): + self.missing = missing + + def __str__(self): + if len(self.missing) == 1: + return "An argument is missing" + else: + return ("%(num)d arguments are missing" % + dict(num=len(self.missing))) + + +def validate_args(fn, *args, **kwargs): + """Check that the supplied args are sufficient for calling a function. + + >>> validate_args(lambda a: None) + Traceback (most recent call last): + ... + MissingArgs: An argument is missing + >>> validate_args(lambda a, b, c, d: None, 0, c=1) + Traceback (most recent call last): + ... + MissingArgs: 2 arguments are missing + + :param fn: the function to check + :param arg: the positional arguments supplied + :param kwargs: the keyword arguments supplied + """ + argspec = inspect.getargspec(fn) + + num_defaults = len(argspec.defaults or []) + required_args = argspec.args[:len(argspec.args) - num_defaults] + + def isbound(method): + return getattr(method, 'im_self', None) is not None + + if isbound(fn): + required_args.pop(0) + + missing = [arg for arg in required_args if arg not in kwargs] + missing = missing[len(args):] + if missing: + raise MissingArgs(missing) diff --git a/rack/openstack/common/config/__init__.py b/rack/openstack/common/config/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/openstack/common/config/generator.py b/rack/openstack/common/config/generator.py new file mode 100644 index 0000000..335ddb6 --- /dev/null +++ b/rack/openstack/common/config/generator.py @@ -0,0 +1,302 @@ +# Copyright 2012 SINA Corporation +# Copyright 2014 Cisco Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Extracts OpenStack config option info from module(s).""" + +from __future__ import print_function + +import argparse +import imp +import os +import re +import socket +import sys +import textwrap + +from oslo.config import cfg +import six +import stevedore.named + +from rack.openstack.common import gettextutils +from rack.openstack.common import importutils + +gettextutils.install('rack') + +STROPT = "StrOpt" +BOOLOPT = "BoolOpt" +INTOPT = "IntOpt" +FLOATOPT = "FloatOpt" +LISTOPT = "ListOpt" +DICTOPT = "DictOpt" +MULTISTROPT = "MultiStrOpt" + +OPT_TYPES = { + STROPT: 'string value', + BOOLOPT: 'boolean value', + INTOPT: 'integer value', + FLOATOPT: 'floating point value', + LISTOPT: 'list value', + DICTOPT: 'dict value', + MULTISTROPT: 'multi valued', +} + +OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT, + FLOATOPT, LISTOPT, DICTOPT, + MULTISTROPT])) + +PY_EXT = ".py" +BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), + "../../../../")) +WORDWRAP_WIDTH = 60 + + +def generate(argv): + parser = argparse.ArgumentParser( + description='generate sample configuration file', + ) + parser.add_argument('-m', dest='modules', action='append') + parser.add_argument('-l', dest='libraries', action='append') + parser.add_argument('srcfiles', nargs='*') + parsed_args = parser.parse_args(argv) + + mods_by_pkg = dict() + for filepath in parsed_args.srcfiles: + pkg_name = filepath.split(os.sep)[1] + mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]), + os.path.basename(filepath).split('.')[0]]) + mods_by_pkg.setdefault(pkg_name, list()).append(mod_str) + # NOTE(lzyeval): place top level modules before packages + pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT)) + ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names) + pkg_names.extend(ext_names) + + # opts_by_group is a mapping of group name to an options list + # The options list is a list of (module, options) tuples + opts_by_group = {'DEFAULT': []} + + if parsed_args.modules: + for module_name in parsed_args.modules: + module = _import_module(module_name) + if module: + for group, opts in _list_opts(module): + opts_by_group.setdefault(group, []).append((module_name, + opts)) + + # Look for entry points defined in libraries (or applications) for + # option discovery, and include their return values in the output. + # + # Each entry point should be a function returning an iterable + # of pairs with the group name (or None for the default group) + # and the list of Opt instances for that group. + if parsed_args.libraries: + loader = stevedore.named.NamedExtensionManager( + 'oslo.config.opts', + names=list(set(parsed_args.libraries)), + invoke_on_load=False, + ) + for ext in loader: + for group, opts in ext.plugin(): + opt_list = opts_by_group.setdefault(group or 'DEFAULT', []) + opt_list.append((ext.name, opts)) + + for pkg_name in pkg_names: + mods = mods_by_pkg.get(pkg_name) + mods.sort() + for mod_str in mods: + if mod_str.endswith('.__init__'): + mod_str = mod_str[:mod_str.rfind(".")] + + mod_obj = _import_module(mod_str) + if not mod_obj: + raise RuntimeError("Unable to import module %s" % mod_str) + + for group, opts in _list_opts(mod_obj): + opts_by_group.setdefault(group, []).append((mod_str, opts)) + + print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', [])) + for group in sorted(opts_by_group.keys()): + print_group_opts(group, opts_by_group[group]) + + +def _import_module(mod_str): + try: + if mod_str.startswith('bin.'): + imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:])) + return sys.modules[mod_str[4:]] + else: + return importutils.import_module(mod_str) + except Exception as e: + sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e))) + return None + + +def _is_in_group(opt, group): + "Check if opt is in group." + for value in group._opts.values(): + # NOTE(llu): Temporary workaround for bug #1262148, wait until + # newly released oslo.config support '==' operator. + if not(value['opt'] != opt): + return True + return False + + +def _guess_groups(opt, mod_obj): + # is it in the DEFAULT group? + if _is_in_group(opt, cfg.CONF): + return 'DEFAULT' + + # what other groups is it in? + for value in cfg.CONF.values(): + if isinstance(value, cfg.CONF.GroupAttr): + if _is_in_group(opt, value._group): + return value._group.name + + raise RuntimeError( + "Unable to find group for option %s, " + "maybe it's defined twice in the same group?" + % opt.name + ) + + +def _list_opts(obj): + def is_opt(o): + return (isinstance(o, cfg.Opt) and + not isinstance(o, cfg.SubCommandOpt)) + + opts = list() + for attr_str in dir(obj): + attr_obj = getattr(obj, attr_str) + if is_opt(attr_obj): + opts.append(attr_obj) + elif (isinstance(attr_obj, list) and + all(map(lambda x: is_opt(x), attr_obj))): + opts.extend(attr_obj) + + ret = {} + for opt in opts: + ret.setdefault(_guess_groups(opt, obj), []).append(opt) + return ret.items() + + +def print_group_opts(group, opts_by_module): + print("[%s]" % group) + print('') + for mod, opts in opts_by_module: + print('#') + print('# Options defined in %s' % mod) + print('#') + print('') + for opt in opts: + _print_opt(opt) + print('') + + +def _get_my_ip(): + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.error: + return None + + +def _sanitize_default(name, value): + """Set up a reasonably sensible default for pybasedir, my_ip and host.""" + if value.startswith(sys.prefix): + # NOTE(jd) Don't use os.path.join, because it is likely to think the + # second part is an absolute pathname and therefore drop the first + # part. + value = os.path.normpath("/usr/" + value[len(sys.prefix):]) + elif value.startswith(BASEDIR): + return value.replace(BASEDIR, '/usr/lib/python/site-packages') + elif BASEDIR in value: + return value.replace(BASEDIR, '') + elif value == _get_my_ip(): + return '10.0.0.1' + elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name: + return 'rack' + elif value.strip() != value: + return '"%s"' % value + return value + + +def _print_opt(opt): + opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help + if not opt_help: + sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name) + opt_help = "" + opt_type = None + try: + opt_type = OPTION_REGEX.search(str(type(opt))).group(0) + except (ValueError, AttributeError) as err: + sys.stderr.write("%s\n" % str(err)) + sys.exit(1) + opt_help = u'%s (%s)' % (opt_help, + OPT_TYPES[opt_type]) + print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))) + if opt.deprecated_opts: + for deprecated_opt in opt.deprecated_opts: + if deprecated_opt.name: + deprecated_group = (deprecated_opt.group if + deprecated_opt.group else "DEFAULT") + print('# Deprecated group/name - [%s]/%s' % + (deprecated_group, + deprecated_opt.name)) + try: + if opt_default is None: + print('#%s=' % opt_name) + elif opt_type == STROPT: + assert(isinstance(opt_default, six.string_types)) + print('#%s=%s' % (opt_name, _sanitize_default(opt_name, + opt_default))) + elif opt_type == BOOLOPT: + assert(isinstance(opt_default, bool)) + print('#%s=%s' % (opt_name, str(opt_default).lower())) + elif opt_type == INTOPT: + assert(isinstance(opt_default, int) and + not isinstance(opt_default, bool)) + print('#%s=%s' % (opt_name, opt_default)) + elif opt_type == FLOATOPT: + assert(isinstance(opt_default, float)) + print('#%s=%s' % (opt_name, opt_default)) + elif opt_type == LISTOPT: + assert(isinstance(opt_default, list)) + print('#%s=%s' % (opt_name, ','.join(opt_default))) + elif opt_type == DICTOPT: + assert(isinstance(opt_default, dict)) + opt_default_strlist = [str(key) + ':' + str(value) + for (key, value) in opt_default.items()] + print('#%s=%s' % (opt_name, ','.join(opt_default_strlist))) + elif opt_type == MULTISTROPT: + assert(isinstance(opt_default, list)) + if not opt_default: + opt_default = [''] + for default in opt_default: + print('#%s=%s' % (opt_name, default)) + print('') + except Exception: + sys.stderr.write('Error in option "%s"\n' % opt_name) + sys.exit(1) + + +def main(): + generate(sys.argv[1:]) + +if __name__ == '__main__': + main() diff --git a/rack/openstack/common/context.py b/rack/openstack/common/context.py new file mode 100644 index 0000000..fe073d6 --- /dev/null +++ b/rack/openstack/common/context.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple class that stores security context information in the web request. + +Projects should subclass this class if they wish to enhance the request +context or provide additional information in their specific WSGI pipeline. +""" + +import itertools + +from rack.openstack.common import uuidutils + + +def generate_request_id(): + return 'req-%s' % uuidutils.generate_uuid() + + +class RequestContext(object): + + """Helper class to represent useful information about a request context. + + Stores information about the security context under which the user + accesses the system, as well as additional request information. + """ + + def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False, + read_only=False, show_deleted=False, request_id=None): + self.auth_token = auth_token + self.user = user + self.tenant = tenant + self.is_admin = is_admin + self.read_only = read_only + self.show_deleted = show_deleted + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + + def to_dict(self): + return {'user': self.user, + 'tenant': self.tenant, + 'is_admin': self.is_admin, + 'read_only': self.read_only, + 'show_deleted': self.show_deleted, + 'auth_token': self.auth_token, + 'request_id': self.request_id} + + +def get_admin_context(show_deleted=False): + context = RequestContext(None, + tenant=None, + is_admin=True, + show_deleted=show_deleted) + return context + + +def get_context_from_function_and_args(function, args, kwargs): + """Find an arg of type RequestContext and return it. + + This is useful in a couple of decorators where we don't + know much about the function we're wrapping. + """ + + for arg in itertools.chain(kwargs.values(), args): + if isinstance(arg, RequestContext): + return arg + + return None diff --git a/rack/openstack/common/db/__init__.py b/rack/openstack/common/db/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/openstack/common/db/api.py b/rack/openstack/common/db/api.py new file mode 100644 index 0000000..bcea694 --- /dev/null +++ b/rack/openstack/common/db/api.py @@ -0,0 +1,162 @@ +# Copyright (c) 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Multiple DB API backend support. + +A DB backend module should implement a method named 'get_backend' which +takes no arguments. The method can return any object that implements DB +API methods. +""" + +import functools +import logging +import threading +import time + +from rack.openstack.common.db import exception +from rack.openstack.common.gettextutils import _LE +from rack.openstack.common import importutils + + +LOG = logging.getLogger(__name__) + + +def safe_for_db_retry(f): + """Enable db-retry for decorated function, if config option enabled.""" + f.__dict__['enable_retry'] = True + return f + + +class wrap_db_retry(object): + """Retry db.api methods, if DBConnectionError() raised + + Retry decorated db.api methods. If we enabled `use_db_reconnect` + in config, this decorator will be applied to all db.api functions, + marked with @safe_for_db_retry decorator. + Decorator catchs DBConnectionError() and retries function in a + loop until it succeeds, or until maximum retries count will be reached. + """ + + def __init__(self, retry_interval, max_retries, inc_retry_interval, + max_retry_interval): + super(wrap_db_retry, self).__init__() + + self.retry_interval = retry_interval + self.max_retries = max_retries + self.inc_retry_interval = inc_retry_interval + self.max_retry_interval = max_retry_interval + + def __call__(self, f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + next_interval = self.retry_interval + remaining = self.max_retries + + while True: + try: + return f(*args, **kwargs) + except exception.DBConnectionError as e: + if remaining == 0: + LOG.exception(_LE('DB exceeded retry limit.')) + raise exception.DBError(e) + if remaining != -1: + remaining -= 1 + LOG.exception(_LE('DB connection error.')) + # NOTE(vsergeyev): We are using patched time module, so + # this effectively yields the execution + # context to another green thread. + time.sleep(next_interval) + if self.inc_retry_interval: + next_interval = min( + next_interval * 2, + self.max_retry_interval + ) + return wrapper + + +class DBAPI(object): + def __init__(self, backend_name, backend_mapping=None, lazy=False, + **kwargs): + """Initialize the chosen DB API backend. + + :param backend_name: name of the backend to load + :type backend_name: str + + :param backend_mapping: backend name -> module/class to load mapping + :type backend_mapping: dict + + :param lazy: load the DB backend lazily on the first DB API method call + :type lazy: bool + + Keyword arguments: + + :keyword use_db_reconnect: retry DB transactions on disconnect or not + :type use_db_reconnect: bool + + :keyword retry_interval: seconds between transaction retries + :type retry_interval: int + + :keyword inc_retry_interval: increase retry interval or not + :type inc_retry_interval: bool + + :keyword max_retry_interval: max interval value between retries + :type max_retry_interval: int + + :keyword max_retries: max number of retries before an error is raised + :type max_retries: int + + """ + + self._backend = None + self._backend_name = backend_name + self._backend_mapping = backend_mapping or {} + self._lock = threading.Lock() + + if not lazy: + self._load_backend() + + self.use_db_reconnect = kwargs.get('use_db_reconnect', False) + self.retry_interval = kwargs.get('retry_interval', 1) + self.inc_retry_interval = kwargs.get('inc_retry_interval', True) + self.max_retry_interval = kwargs.get('max_retry_interval', 10) + self.max_retries = kwargs.get('max_retries', 20) + + def _load_backend(self): + with self._lock: + if not self._backend: + # Import the untranslated name if we don't have a mapping + backend_path = self._backend_mapping.get(self._backend_name, + self._backend_name) + backend_mod = importutils.import_module(backend_path) + self._backend = backend_mod.get_backend() + + def __getattr__(self, key): + if not self._backend: + self._load_backend() + + attr = getattr(self._backend, key) + if not hasattr(attr, '__call__'): + return attr + # NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry + # DB API methods, decorated with @safe_for_db_retry + # on disconnect. + if self.use_db_reconnect and hasattr(attr, 'enable_retry'): + attr = wrap_db_retry( + retry_interval=self.retry_interval, + max_retries=self.max_retries, + inc_retry_interval=self.inc_retry_interval, + max_retry_interval=self.max_retry_interval)(attr) + + return attr diff --git a/rack/openstack/common/db/exception.py b/rack/openstack/common/db/exception.py new file mode 100644 index 0000000..e1d28e0 --- /dev/null +++ b/rack/openstack/common/db/exception.py @@ -0,0 +1,56 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""DB related custom exceptions.""" + +import six + +from rack.openstack.common.gettextutils import _ + + +class DBError(Exception): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(six.text_type(inner_exception)) + + +class DBDuplicateEntry(DBError): + """Wraps an implementation specific exception.""" + def __init__(self, columns=[], inner_exception=None): + self.columns = columns + super(DBDuplicateEntry, self).__init__(inner_exception) + + +class DBDeadlock(DBError): + def __init__(self, inner_exception=None): + super(DBDeadlock, self).__init__(inner_exception) + + +class DBInvalidUnicodeParameter(Exception): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +class DbMigrationError(DBError): + """Wraps migration specific exception.""" + def __init__(self, message=None): + super(DbMigrationError, self).__init__(message) + + +class DBConnectionError(DBError): + """Wraps connection specific exception.""" + pass diff --git a/rack/openstack/common/db/options.py b/rack/openstack/common/db/options.py new file mode 100644 index 0000000..9109774 --- /dev/null +++ b/rack/openstack/common/db/options.py @@ -0,0 +1,168 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo.config import cfg + + +database_opts = [ + cfg.StrOpt('sqlite_db', + deprecated_group='DEFAULT', + default='rack.sqlite', + help='The file name to use with SQLite'), + cfg.BoolOpt('sqlite_synchronous', + deprecated_group='DEFAULT', + default=True, + help='If True, SQLite uses synchronous mode'), + cfg.StrOpt('backend', + default='sqlalchemy', + deprecated_name='db_backend', + deprecated_group='DEFAULT', + help='The backend to use for db'), + cfg.StrOpt('connection', + help='The SQLAlchemy connection string used to connect to the ' + 'database', + secret=True, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_connection', + group='DATABASE'), + cfg.DeprecatedOpt('connection', + group='sql'), ]), + cfg.StrOpt('mysql_sql_mode', + help='The SQL mode to be used for MySQL sessions ' + '(default is empty, meaning do not override ' + 'any server-side SQL mode setting)'), + cfg.IntOpt('idle_timeout', + default=3600, + deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_idle_timeout', + group='DATABASE'), + cfg.DeprecatedOpt('idle_timeout', + group='sql')], + help='Timeout before idle sql connections are reaped'), + cfg.IntOpt('min_pool_size', + default=1, + deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_min_pool_size', + group='DATABASE')], + help='Minimum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_pool_size', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_pool_size', + group='DATABASE')], + help='Maximum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_retries', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_retries', + group='DATABASE')], + help='Maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('retry_interval', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval', + group='DEFAULT'), + cfg.DeprecatedOpt('reconnect_interval', + group='DATABASE')], + help='Interval between retries of opening a sql connection'), + cfg.IntOpt('max_overflow', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', + group='DEFAULT'), + cfg.DeprecatedOpt('sqlalchemy_max_overflow', + group='DATABASE')], + help='If set, use this value for max_overflow with sqlalchemy'), + cfg.IntOpt('connection_debug', + default=0, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug', + group='DEFAULT')], + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.BoolOpt('connection_trace', + default=False, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace', + group='DEFAULT')], + help='Add python stack traces to SQL as comment strings'), + cfg.IntOpt('pool_timeout', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout', + group='DATABASE')], + help='If set, use this value for pool_timeout with sqlalchemy'), + cfg.BoolOpt('use_db_reconnect', + default=False, + help='Enable the experimental use of database reconnect ' + 'on connection lost'), + cfg.IntOpt('db_retry_interval', + default=1, + help='seconds between db connection retries'), + cfg.BoolOpt('db_inc_retry_interval', + default=True, + help='Whether to increase interval between db connection ' + 'retries, up to db_max_retry_interval'), + cfg.IntOpt('db_max_retry_interval', + default=10, + help='max seconds between db connection retries, if ' + 'db_inc_retry_interval is enabled'), + cfg.IntOpt('db_max_retries', + default=20, + help='maximum db connection retries before error is raised. ' + '(setting -1 implies an infinite retry count)'), +] + +CONF = cfg.CONF +CONF.register_opts(database_opts, 'database') + + +def set_defaults(sql_connection, sqlite_db, max_pool_size=None, + max_overflow=None, pool_timeout=None): + """Set defaults for configuration variables.""" + cfg.set_defaults(database_opts, + connection=sql_connection, + sqlite_db=sqlite_db) + # Update the QueuePool defaults + if max_pool_size is not None: + cfg.set_defaults(database_opts, + max_pool_size=max_pool_size) + if max_overflow is not None: + cfg.set_defaults(database_opts, + max_overflow=max_overflow) + if pool_timeout is not None: + cfg.set_defaults(database_opts, + pool_timeout=pool_timeout) + + +def list_opts(): + """Returns a list of oslo.config options available in the library. + + The returned list includes all oslo.config options which may be registered + at runtime by the library. + + Each element of the list is a tuple. The first element is the name of the + group under which the list of elements in the second element will be + registered. A group name of None corresponds to the [DEFAULT] group in + config files. + + The purpose of this is to allow tools like the Oslo sample config file + generator to discover the options exposed to users by this library. + + :returns: a list of (group_name, opts) tuples + """ + return [('database', copy.deepcopy(database_opts))] diff --git a/rack/openstack/common/db/sqlalchemy/__init__.py b/rack/openstack/common/db/sqlalchemy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/openstack/common/db/sqlalchemy/migration.py b/rack/openstack/common/db/sqlalchemy/migration.py new file mode 100644 index 0000000..6b63d0f --- /dev/null +++ b/rack/openstack/common/db/sqlalchemy/migration.py @@ -0,0 +1,268 @@ +# coding: utf-8 +# +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Base on code in migrate/changeset/databases/sqlite.py which is under +# the following license: +# +# The MIT License +# +# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +import os +import re + +from migrate.changeset import ansisql +from migrate.changeset.databases import sqlite +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository +import sqlalchemy +from sqlalchemy.schema import UniqueConstraint + +from rack.openstack.common.db import exception +from rack.openstack.common.gettextutils import _ + + +def _get_unique_constraints(self, table): + """Retrieve information about existing unique constraints of the table + + This feature is needed for _recreate_table() to work properly. + Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x. + + """ + + data = table.metadata.bind.execute( + """SELECT sql + FROM sqlite_master + WHERE + type='table' AND + name=:table_name""", + table_name=table.name + ).fetchone()[0] + + UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" + return [ + UniqueConstraint( + *[getattr(table.columns, c.strip(' "')) for c in cols.split(",")], + name=name + ) + for name, cols in re.findall(UNIQUE_PATTERN, data) + ] + + +def _recreate_table(self, table, column=None, delta=None, omit_uniques=None): + """Recreate the table properly + + Unlike the corresponding original method of sqlalchemy-migrate this one + doesn't drop existing unique constraints when creating a new one. + + """ + + table_name = self.preparer.format_table(table) + + # we remove all indexes so as not to have + # problems during copy and re-create + for index in table.indexes: + index.drop() + + # reflect existing unique constraints + for uc in self._get_unique_constraints(table): + table.append_constraint(uc) + # omit given unique constraints when creating a new table if required + table.constraints = set([ + cons for cons in table.constraints + if omit_uniques is None or cons.name not in omit_uniques + ]) + + self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name) + self.execute() + + insertion_string = self._modify_table(table, column, delta) + + table.create(bind=self.connection) + self.append(insertion_string % {'table_name': table_name}) + self.execute() + self.append('DROP TABLE migration_tmp') + self.execute() + + +def _visit_migrate_unique_constraint(self, *p, **k): + """Drop the given unique constraint + + The corresponding original method of sqlalchemy-migrate just + raises NotImplemented error + + """ + + self.recreate_table(p[0].table, omit_uniques=[p[0].name]) + + +def patch_migrate(): + """A workaround for SQLite's inability to alter things + + SQLite abilities to alter tables are very limited (please read + http://www.sqlite.org/lang_altertable.html for more details). + E. g. one can't drop a column or a constraint in SQLite. The + workaround for this is to recreate the original table omitting + the corresponding constraint (or column). + + sqlalchemy-migrate library has recreate_table() method that + implements this workaround, but it does it wrong: + + - information about unique constraints of a table + is not retrieved. So if you have a table with one + unique constraint and a migration adding another one + you will end up with a table that has only the + latter unique constraint, and the former will be lost + + - dropping of unique constraints is not supported at all + + The proper way to fix this is to provide a pull-request to + sqlalchemy-migrate, but the project seems to be dead. So we + can go on with monkey-patching of the lib at least for now. + + """ + + # this patch is needed to ensure that recreate_table() doesn't drop + # existing unique constraints of the table when creating a new one + helper_cls = sqlite.SQLiteHelper + helper_cls.recreate_table = _recreate_table + helper_cls._get_unique_constraints = _get_unique_constraints + + # this patch is needed to be able to drop existing unique constraints + constraint_cls = sqlite.SQLiteConstraintDropper + constraint_cls.visit_migrate_unique_constraint = \ + _visit_migrate_unique_constraint + constraint_cls.__bases__ = (ansisql.ANSIColumnDropper, + sqlite.SQLiteConstraintGenerator) + + +def db_sync(engine, abs_path, version=None, init_version=0): + """Upgrade or downgrade a database. + + Function runs the upgrade() or downgrade() functions in change scripts. + + :param engine: SQLAlchemy engine instance for a given database + :param abs_path: Absolute path to migrate repository. + :param version: Database will upgrade/downgrade until this version. + If None - database will update to the latest + available version. + :param init_version: Initial database version + """ + if version is not None: + try: + version = int(version) + except ValueError: + raise exception.DbMigrationError( + message=_("version should be an integer")) + + current_version = db_version(engine, abs_path, init_version) + repository = _find_migrate_repo(abs_path) + _db_schema_sanity_check(engine) + if version is None or version > current_version: + return versioning_api.upgrade(engine, repository, version) + else: + return versioning_api.downgrade(engine, repository, + version) + + +def _db_schema_sanity_check(engine): + """Ensure all database tables were created with required parameters. + + :param engine: SQLAlchemy engine instance for a given database + + """ + + if engine.name == 'mysql': + onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' + 'from information_schema.TABLES ' + 'where TABLE_SCHEMA=%s and ' + 'TABLE_COLLATION NOT LIKE "%%utf8%%"') + + table_names = [res[0] for res in engine.execute(onlyutf8_sql, + engine.url.database)] + if len(table_names) > 0: + raise ValueError(_('Tables "%s" have non utf8 collation, ' + 'please make sure all tables are CHARSET=utf8' + ) % ','.join(table_names)) + + +def db_version(engine, abs_path, init_version): + """Show the current version of the repository. + + :param engine: SQLAlchemy engine instance for a given database + :param abs_path: Absolute path to migrate repository + :param version: Initial database version + """ + repository = _find_migrate_repo(abs_path) + try: + return versioning_api.db_version(engine, repository) + except versioning_exceptions.DatabaseNotControlledError: + meta = sqlalchemy.MetaData() + meta.reflect(bind=engine) + tables = meta.tables + if len(tables) == 0 or 'alembic_version' in tables: + db_version_control(engine, abs_path, version=init_version) + return versioning_api.db_version(engine, repository) + else: + raise exception.DbMigrationError( + message=_( + "The database is not under version control, but has " + "tables. Please stamp the current version of the schema " + "manually.")) + + +def db_version_control(engine, abs_path, version=None): + """Mark a database as under this repository's version control. + + Once a database is under version control, schema changes should + only be done via change scripts in this repository. + + :param engine: SQLAlchemy engine instance for a given database + :param abs_path: Absolute path to migrate repository + :param version: Initial database version + """ + repository = _find_migrate_repo(abs_path) + versioning_api.version_control(engine, repository, version) + return version + + +def _find_migrate_repo(abs_path): + """Get the project's change script repository + + :param abs_path: Absolute path to migrate repository + """ + if not os.path.exists(abs_path): + raise exception.DbMigrationError("Path %s not found" % abs_path) + return Repository(abs_path) diff --git a/rack/openstack/common/db/sqlalchemy/models.py b/rack/openstack/common/db/sqlalchemy/models.py new file mode 100644 index 0000000..4a8c9f6 --- /dev/null +++ b/rack/openstack/common/db/sqlalchemy/models.py @@ -0,0 +1,115 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# Copyright 2012 Cloudscaling Group, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models. +""" + +import six + +from sqlalchemy import Column, Integer +from sqlalchemy import DateTime +from sqlalchemy.orm import object_mapper + +from rack.openstack.common import timeutils + + +class ModelBase(object): + """Base class for models.""" + __table_initialized__ = False + + def save(self, session): + """Save this object.""" + + # NOTE(boris-42): This part of code should be look like: + # session.add(self) + # session.flush() + # But there is a bug in sqlalchemy and eventlet that + # raises NoneType exception if there is no running + # transaction and rollback is called. As long as + # sqlalchemy has this bug we have to create transaction + # explicitly. + with session.begin(subtransactions=True): + session.add(self) + session.flush() + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + @property + def _extra_keys(self): + """Specifies custom fields + + Subclasses can override this property to return a list + of custom fields that should be included in their dict + representation. + + For reference check tests/db/sqlalchemy/test_models.py + """ + return [] + + def __iter__(self): + columns = dict(object_mapper(self).columns).keys() + # NOTE(russellb): Allow models to specify other keys that can be looked + # up, beyond the actual db columns. An example would be the 'name' + # property for an Instance. + columns.extend(self._extra_keys) + self._i = iter(columns) + return self + + def next(self): + n = six.advance_iterator(self._i) + return n, getattr(self, n) + + def update(self, values): + """Make the model object behave like a dict.""" + for k, v in six.iteritems(values): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins. + """ + local = dict(self) + joined = dict([(k, v) for k, v in six.iteritems(self.__dict__) + if not k[0] == '_']) + local.update(joined) + return six.iteritems(local) + + +class TimestampMixin(object): + created_at = Column(DateTime, default=lambda: timeutils.utcnow()) + updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) + + +class SoftDeleteMixin(object): + deleted_at = Column(DateTime) + deleted = Column(Integer, default=0) + + def soft_delete(self, session): + """Mark this object as deleted.""" + self.deleted = self.id + self.deleted_at = timeutils.utcnow() + self.save(session=session) diff --git a/rack/openstack/common/db/sqlalchemy/provision.py b/rack/openstack/common/db/sqlalchemy/provision.py new file mode 100644 index 0000000..14f8020 --- /dev/null +++ b/rack/openstack/common/db/sqlalchemy/provision.py @@ -0,0 +1,187 @@ +# Copyright 2013 Mirantis.inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provision test environment for specific DB backends""" + +import argparse +import os +import random +import string + +from six import moves +import sqlalchemy + +from rack.openstack.common.db import exception as exc + + +SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://') + + +def _gen_credentials(*names): + """Generate credentials.""" + auth_dict = {} + for name in names: + val = ''.join(random.choice(string.ascii_lowercase) + for i in moves.range(10)) + auth_dict[name] = val + return auth_dict + + +def _get_engine(uri=SQL_CONNECTION): + """Engine creation + + By default the uri is SQL_CONNECTION which is admin credentials. + Call the function without arguments to get admin connection. Admin + connection required to create temporary user and database for each + particular test. Otherwise use existing connection to recreate connection + to the temporary database. + """ + return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool) + + +def _execute_sql(engine, sql, driver): + """Initialize connection, execute sql query and close it.""" + try: + with engine.connect() as conn: + if driver == 'postgresql': + conn.connection.set_isolation_level(0) + for s in sql: + conn.execute(s) + except sqlalchemy.exc.OperationalError: + msg = ('%s does not match database admin ' + 'credentials or database does not exist.') + raise exc.DBConnectionError(msg % SQL_CONNECTION) + + +def create_database(engine): + """Provide temporary user and database for each particular test.""" + driver = engine.name + + auth = _gen_credentials('database', 'user', 'passwd') + + sqls = { + 'mysql': [ + "drop database if exists %(database)s;", + "grant all on %(database)s.* to '%(user)s'@'localhost'" + " identified by '%(passwd)s';", + "create database %(database)s;", + ], + 'postgresql': [ + "drop database if exists %(database)s;", + "drop user if exists %(user)s;", + "create user %(user)s with password '%(passwd)s';", + "create database %(database)s owner %(user)s;", + ] + } + + if driver == 'sqlite': + return 'sqlite:////tmp/%s' % auth['database'] + + try: + sql_rows = sqls[driver] + except KeyError: + raise ValueError('Unsupported RDBMS %s' % driver) + sql_query = map(lambda x: x % auth, sql_rows) + + _execute_sql(engine, sql_query, driver) + + params = auth.copy() + params['backend'] = driver + return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params + + +def drop_database(engine, current_uri): + """Drop temporary database and user after each particular test.""" + engine = _get_engine(current_uri) + admin_engine = _get_engine() + driver = engine.name + auth = {'database': engine.url.database, 'user': engine.url.username} + + if driver == 'sqlite': + try: + os.remove(auth['database']) + except OSError: + pass + return + + sqls = { + 'mysql': [ + "drop database if exists %(database)s;", + "drop user '%(user)s'@'localhost';", + ], + 'postgresql': [ + "drop database if exists %(database)s;", + "drop user if exists %(user)s;", + ] + } + + try: + sql_rows = sqls[driver] + except KeyError: + raise ValueError('Unsupported RDBMS %s' % driver) + sql_query = map(lambda x: x % auth, sql_rows) + + _execute_sql(admin_engine, sql_query, driver) + + +def main(): + """Controller to handle commands + + ::create: Create test user and database with random names. + ::drop: Drop user and database created by previous command. + """ + parser = argparse.ArgumentParser( + description='Controller to handle database creation and dropping' + ' commands.', + epilog='Under normal circumstances is not used directly.' + ' Used in .testr.conf to automate test database creation' + ' and dropping processes.') + subparsers = parser.add_subparsers( + help='Subcommands to manipulate temporary test databases.') + + create = subparsers.add_parser( + 'create', + help='Create temporary test ' + 'databases and users.') + create.set_defaults(which='create') + create.add_argument( + 'instances_count', + type=int, + help='Number of databases to create.') + + drop = subparsers.add_parser( + 'drop', + help='Drop temporary test databases and users.') + drop.set_defaults(which='drop') + drop.add_argument( + 'instances', + nargs='+', + help='List of databases uri to be dropped.') + + args = parser.parse_args() + + engine = _get_engine() + which = args.which + + if which == "create": + for i in range(int(args.instances_count)): + print(create_database(engine)) + elif which == "drop": + for db in args.instances: + drop_database(engine, db) + + +if __name__ == "__main__": + main() diff --git a/rack/openstack/common/db/sqlalchemy/session.py b/rack/openstack/common/db/sqlalchemy/session.py new file mode 100644 index 0000000..3ec9bd8 --- /dev/null +++ b/rack/openstack/common/db/sqlalchemy/session.py @@ -0,0 +1,860 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend. + +Recommended ways to use sessions within this framework: + +* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``. + `model_query()` will implicitly use a session when called without one + supplied. This is the ideal situation because it will allow queries + to be automatically retried if the database connection is interrupted. + + .. note:: Automatic retry will be enabled in a future patch. + + It is generally fine to issue several queries in a row like this. Even though + they may be run in separate transactions and/or separate sessions, each one + will see the data from the prior calls. If needed, undo- or rollback-like + functionality should be handled at a logical level. For an example, look at + the code around quotas and `reservation_rollback()`. + + Examples: + + .. code:: python + + def get_foo(context, foo): + return (model_query(context, models.Foo). + filter_by(foo=foo). + first()) + + def update_foo(context, id, newfoo): + (model_query(context, models.Foo). + filter_by(id=id). + update({'foo': newfoo})) + + def create_foo(context, values): + foo_ref = models.Foo() + foo_ref.update(values) + foo_ref.save() + return foo_ref + + +* Within the scope of a single method, keep all the reads and writes within + the context managed by a single session. In this way, the session's + `__exit__` handler will take care of calling `flush()` and `commit()` for + you. If using this approach, you should not explicitly call `flush()` or + `commit()`. Any error within the context of the session will cause the + session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be + raised in `session`'s `__exit__` handler, and any try/except within the + context managed by `session` will not be triggered. And catching other + non-database errors in the session will not trigger the ROLLBACK, so + exception handlers should always be outside the session, unless the + developer wants to do a partial commit on purpose. If the connection is + dropped before this is possible, the database will implicitly roll back the + transaction. + + .. note:: Statements in the session scope will not be automatically retried. + + If you create models within the session, they need to be added, but you + do not need to call `model.save()`: + + .. code:: python + + def create_many_foo(context, foos): + session = sessionmaker() + with session.begin(): + for foo in foos: + foo_ref = models.Foo() + foo_ref.update(foo) + session.add(foo_ref) + + def update_bar(context, foo_id, newbar): + session = sessionmaker() + with session.begin(): + foo_ref = (model_query(context, models.Foo, session). + filter_by(id=foo_id). + first()) + (model_query(context, models.Bar, session). + filter_by(id=foo_ref['bar_id']). + update({'bar': newbar})) + + .. note:: `update_bar` is a trivially simple example of using + ``with session.begin``. Whereas `create_many_foo` is a good example of + when a transaction is needed, it is always best to use as few queries as + possible. + + The two queries in `update_bar` can be better expressed using a single query + which avoids the need for an explicit transaction. It can be expressed like + so: + + .. code:: python + + def update_bar(context, foo_id, newbar): + subq = (model_query(context, models.Foo.id). + filter_by(id=foo_id). + limit(1). + subquery()) + (model_query(context, models.Bar). + filter_by(id=subq.as_scalar()). + update({'bar': newbar})) + + For reference, this emits approximately the following SQL statement: + + .. code:: sql + + UPDATE bar SET bar = ${newbar} + WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); + + .. note:: `create_duplicate_foo` is a trivially simple example of catching an + exception while using ``with session.begin``. Here create two duplicate + instances with same primary key, must catch the exception out of context + managed by a single session: + + .. code:: python + + def create_duplicate_foo(context): + foo1 = models.Foo() + foo2 = models.Foo() + foo1.id = foo2.id = 1 + session = sessionmaker() + try: + with session.begin(): + session.add(foo1) + session.add(foo2) + except exception.DBDuplicateEntry as e: + handle_error(e) + +* Passing an active session between methods. Sessions should only be passed + to private methods. The private method must use a subtransaction; otherwise + SQLAlchemy will throw an error when you call `session.begin()` on an existing + transaction. Public methods should not accept a session parameter and should + not be involved in sessions within the caller's scope. + + Note that this incurs more overhead in SQLAlchemy than the above means + due to nesting transactions, and it is not possible to implicitly retry + failed database operations when using this approach. + + This also makes code somewhat more difficult to read and debug, because a + single database transaction spans more than one method. Error handling + becomes less clear in this situation. When this is needed for code clarity, + it should be clearly documented. + + .. code:: python + + def myfunc(foo): + session = sessionmaker() + with session.begin(): + # do some database things + bar = _private_func(foo, session) + return bar + + def _private_func(foo, session=None): + if not session: + session = sessionmaker() + with session.begin(subtransaction=True): + # do some other database things + return bar + + +There are some things which it is best to avoid: + +* Don't keep a transaction open any longer than necessary. + + This means that your ``with session.begin()`` block should be as short + as possible, while still containing all the related calls for that + transaction. + +* Avoid ``with_lockmode('UPDATE')`` when possible. + + In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match + any rows, it will take a gap-lock. This is a form of write-lock on the + "gap" where no rows exist, and prevents any other writes to that space. + This can effectively prevent any INSERT into a table by locking the gap + at the end of the index. Similar problems will occur if the SELECT FOR UPDATE + has an overly broad WHERE clause, or doesn't properly use an index. + + One idea proposed at ODS Fall '12 was to use a normal SELECT to test the + number of rows matching a query, and if only one row is returned, + then issue the SELECT FOR UPDATE. + + The better long-term solution is to use + ``INSERT .. ON DUPLICATE KEY UPDATE``. + However, this can not be done until the "deleted" columns are removed and + proper UNIQUE constraints are added to the tables. + + +Enabling soft deletes: + +* To use/enable soft-deletes, the `SoftDeleteMixin` must be added + to your model class. For example: + + .. code:: python + + class NovaBase(models.SoftDeleteMixin, models.ModelBase): + pass + + +Efficient use of soft deletes: + +* There are two possible ways to mark a record as deleted: + `model.soft_delete()` and `query.soft_delete()`. + + The `model.soft_delete()` method works with a single already-fetched entry. + `query.soft_delete()` makes only one db request for all entries that + correspond to the query. + +* In almost all cases you should use `query.soft_delete()`. Some examples: + + .. code:: python + + def soft_delete_bar(): + count = model_query(BarModel).find(some_condition).soft_delete() + if count == 0: + raise Exception("0 entries were soft deleted") + + def complex_soft_delete_with_synchronization_bar(session=None): + if session is None: + session = sessionmaker() + with session.begin(subtransactions=True): + count = (model_query(BarModel). + find(some_condition). + soft_delete(synchronize_session=True)) + # Here synchronize_session is required, because we + # don't know what is going on in outer session. + if count == 0: + raise Exception("0 entries were soft deleted") + +* There is only one situation where `model.soft_delete()` is appropriate: when + you fetch a single record, work with it, and mark it as deleted in the same + transaction. + + .. code:: python + + def soft_delete_bar_model(): + session = sessionmaker() + with session.begin(): + bar_ref = model_query(BarModel).find(some_condition).first() + # Work with bar_ref + bar_ref.soft_delete(session=session) + + However, if you need to work with all entries that correspond to query and + then soft delete them you should use the `query.soft_delete()` method: + + .. code:: python + + def soft_delete_multi_models(): + session = sessionmaker() + with session.begin(): + query = (model_query(BarModel, session=session). + find(some_condition)) + model_refs = query.all() + # Work with model_refs + query.soft_delete(synchronize_session=False) + # synchronize_session=False should be set if there is no outer + # session and these entries are not used after this. + + When working with many rows, it is very important to use query.soft_delete, + which issues a single query. Using `model.soft_delete()`, as in the following + example, is very inefficient. + + .. code:: python + + for bar_ref in bar_refs: + bar_ref.soft_delete(session=session) + # This will produce count(bar_refs) db requests. + +""" + +import functools +import logging +import re +import time + +import six +from sqlalchemy import exc as sqla_exc +from sqlalchemy.interfaces import PoolListener +import sqlalchemy.orm +from sqlalchemy.pool import NullPool, StaticPool +from sqlalchemy.sql.expression import literal_column + +from rack.openstack.common.db import exception +from rack.openstack.common.gettextutils import _LE, _LW, _LI +from rack.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + + +class SqliteForeignKeysListener(PoolListener): + """Ensures that the foreign key constraints are enforced in SQLite. + + The foreign key constraints are disabled by default in SQLite, + so the foreign key constraints will be enabled here for every + database connection + """ + def connect(self, dbapi_con, con_record): + dbapi_con.execute('pragma foreign_keys=ON') + + +# note(boris-42): In current versions of DB backends unique constraint +# violation messages follow the structure: +# +# sqlite: +# 1 column - (IntegrityError) column c1 is not unique +# N columns - (IntegrityError) column c1, c2, ..., N are not unique +# +# sqlite since 3.7.16: +# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1 +# +# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2 +# +# postgres: +# 1 column - (IntegrityError) duplicate key value violates unique +# constraint "users_c1_key" +# N columns - (IntegrityError) duplicate key value violates unique +# constraint "name_of_our_constraint" +# +# mysql: +# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key +# 'c1'") +# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined +# with -' for key 'name_of_our_constraint'") +# +# ibm_db_sa: +# N columns - (IntegrityError) SQL0803N One or more values in the INSERT +# statement, UPDATE statement, or foreign key update caused by a +# DELETE statement are not valid because the primary key, unique +# constraint or unique index identified by "2" constrains table +# "NOVA.KEY_PAIRS" from having duplicate values for the index +# key. +_DUP_KEY_RE_DB = { + "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), + re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), + "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), + "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),), + "ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),), +} + + +def _raise_if_duplicate_entry_error(integrity_error, engine_name): + """Raise exception if two entries are duplicated. + + In this function will be raised DBDuplicateEntry exception if integrity + error wrap unique constraint violation. + """ + + def get_columns_from_uniq_cons_or_name(columns): + # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" + # where `t` it is table name and columns `c1`, `c2` + # are in UniqueConstraint. + uniqbase = "uniq_" + if not columns.startswith(uniqbase): + if engine_name == "postgresql": + return [columns[columns.index("_") + 1:columns.rindex("_")]] + return [columns] + return columns[len(uniqbase):].split("0")[1:] + + if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]: + return + + # FIXME(johannes): The usage of the .message attribute has been + # deprecated since Python 2.6. However, the exceptions raised by + # SQLAlchemy can differ when using unicode() and accessing .message. + # An audit across all three supported engines will be necessary to + # ensure there are no regressions. + for pattern in _DUP_KEY_RE_DB[engine_name]: + match = pattern.match(integrity_error.message) + if match: + break + else: + return + + # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the + # columns so we have to omit that from the DBDuplicateEntry error. + columns = '' + + if engine_name != 'ibm_db_sa': + columns = match.group(1) + + if engine_name == "sqlite": + columns = [c.split('.')[-1] for c in columns.strip().split(", ")] + else: + columns = get_columns_from_uniq_cons_or_name(columns) + raise exception.DBDuplicateEntry(columns, integrity_error) + + +# NOTE(comstud): In current versions of DB backends, Deadlock violation +# messages follow the structure: +# +# mysql: +# (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' +# 'restarting transaction') +_DEADLOCK_RE_DB = { + "mysql": re.compile(r"^.*\(1213, 'Deadlock.*") +} + + +def _raise_if_deadlock_error(operational_error, engine_name): + """Raise exception on deadlock condition. + + Raise DBDeadlock exception if OperationalError contains a Deadlock + condition. + """ + re = _DEADLOCK_RE_DB.get(engine_name) + if re is None: + return + # FIXME(johannes): The usage of the .message attribute has been + # deprecated since Python 2.6. However, the exceptions raised by + # SQLAlchemy can differ when using unicode() and accessing .message. + # An audit across all three supported engines will be necessary to + # ensure there are no regressions. + m = re.match(operational_error.message) + if not m: + return + raise exception.DBDeadlock(operational_error) + + +def _wrap_db_error(f): + #TODO(rpodolyaka): in a subsequent commit make this a class decorator to + # ensure it can only applied to Session subclasses instances (as we use + # Session instance bind attribute below) + + @functools.wraps(f) + def _wrap(self, *args, **kwargs): + try: + return f(self, *args, **kwargs) + except UnicodeEncodeError: + raise exception.DBInvalidUnicodeParameter() + except sqla_exc.OperationalError as e: + _raise_if_db_connection_lost(e, self.bind) + _raise_if_deadlock_error(e, self.bind.dialect.name) + # NOTE(comstud): A lot of code is checking for OperationalError + # so let's not wrap it for now. + raise + # note(boris-42): We should catch unique constraint violation and + # wrap it by our own DBDuplicateEntry exception. Unique constraint + # violation is wrapped by IntegrityError. + except sqla_exc.IntegrityError as e: + # note(boris-42): SqlAlchemy doesn't unify errors from different + # DBs so we must do this. Also in some tables (for example + # instance_types) there are more than one unique constraint. This + # means we should get names of columns, which values violate + # unique constraint, from error message. + _raise_if_duplicate_entry_error(e, self.bind.dialect.name) + raise exception.DBError(e) + except Exception as e: + LOG.exception(_LE('DB exception wrapped.')) + raise exception.DBError(e) + return _wrap + + +def _synchronous_switch_listener(dbapi_conn, connection_rec): + """Switch sqlite connections to non-synchronous mode.""" + dbapi_conn.execute("PRAGMA synchronous = OFF") + + +def _add_regexp_listener(dbapi_con, con_record): + """Add REGEXP function to sqlite connections.""" + + def regexp(expr, item): + reg = re.compile(expr) + return reg.search(six.text_type(item)) is not None + dbapi_con.create_function('regexp', 2, regexp) + + +def _thread_yield(dbapi_con, con_record): + """Ensure other greenthreads get a chance to be executed. + + If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will + execute instead of time.sleep(0). + Force a context switch. With common database backends (eg MySQLdb and + sqlite), there is no implicit yield caused by network I/O since they are + implemented by C libraries that eventlet cannot monkey patch. + """ + time.sleep(0) + + +def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): + """Ensures that MySQL and DB2 connections are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + cursor = dbapi_conn.cursor() + try: + ping_sql = 'select 1' + if engine.name == 'ibm_db_sa': + # DB2 requires a table expression + ping_sql = 'select 1 from (values (1)) AS t1' + cursor.execute(ping_sql) + except Exception as ex: + if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): + msg = _LW('Database server has gone away: %s') % ex + LOG.warning(msg) + raise sqla_exc.DisconnectionError(msg) + else: + raise + + +def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy): + """Set engine mode to 'traditional'. + + Required to prevent silent truncates at insert or update operations + under MySQL. By default MySQL truncates inserted string if it longer + than a declared field just with warning. That is fraught with data + corruption. + """ + _set_session_sql_mode(dbapi_con, connection_rec, + connection_proxy, 'TRADITIONAL') + + +def _set_session_sql_mode(dbapi_con, connection_rec, + connection_proxy, sql_mode=None): + """Set the sql_mode session variable. + + MySQL supports several server modes. The default is None, but sessions + may choose to enable server modes like TRADITIONAL, ANSI, + several STRICT_* modes and others. + + Note: passing in '' (empty string) for sql_mode clears + the SQL mode for the session, overriding a potentially set + server default. Passing in None (the default) makes this + a no-op, meaning if a server-side SQL mode is set, it still applies. + """ + cursor = dbapi_con.cursor() + if sql_mode is not None: + cursor.execute("SET SESSION sql_mode = %s", [sql_mode]) + + # Check against the real effective SQL mode. Even when unset by + # our own config, the server may still be operating in a specific + # SQL mode as set by the server configuration + cursor.execute("SHOW VARIABLES LIKE 'sql_mode'") + row = cursor.fetchone() + if row is None: + LOG.warning(_LW('Unable to detect effective SQL mode')) + return + realmode = row[1] + LOG.info(_LI('MySQL server mode set to %s') % realmode) + # 'TRADITIONAL' mode enables several other modes, so + # we need a substring match here + if not ('TRADITIONAL' in realmode.upper() or + 'STRICT_ALL_TABLES' in realmode.upper()): + LOG.warning(_LW("MySQL SQL mode is '%s', " + "consider enabling TRADITIONAL or STRICT_ALL_TABLES") + % realmode) + + +def _is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + # For the db2, the error code is -30081 since the db2 is still not ready + conn_err_codes = ('2002', '2003', '2006', '2013', '-30081') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def _raise_if_db_connection_lost(error, engine): + # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor) + # requires connection and cursor in incoming parameters, + # but we have no possibility to create connection if DB + # is not available, so in such case reconnect fails. + # But is_disconnect() ignores these parameters, so it + # makes sense to pass to function None as placeholder + # instead of connection and cursor. + if engine.dialect.is_disconnect(error, None, None): + raise exception.DBConnectionError(error) + + +def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None, + mysql_traditional_mode=False, idle_timeout=3600, + connection_debug=0, max_pool_size=None, max_overflow=None, + pool_timeout=None, sqlite_synchronous=True, + connection_trace=False, max_retries=10, retry_interval=10): + """Return a new SQLAlchemy engine.""" + + connection_dict = sqlalchemy.engine.url.make_url(sql_connection) + + engine_args = { + "pool_recycle": idle_timeout, + 'convert_unicode': True, + } + + logger = logging.getLogger('sqlalchemy.engine') + + # Map SQL debug level to Python log level + if connection_debug >= 100: + logger.setLevel(logging.DEBUG) + elif connection_debug >= 50: + logger.setLevel(logging.INFO) + else: + logger.setLevel(logging.WARNING) + + if "sqlite" in connection_dict.drivername: + if sqlite_fk: + engine_args["listeners"] = [SqliteForeignKeysListener()] + engine_args["poolclass"] = NullPool + + if sql_connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + else: + if max_pool_size is not None: + engine_args['pool_size'] = max_pool_size + if max_overflow is not None: + engine_args['max_overflow'] = max_overflow + if pool_timeout is not None: + engine_args['pool_timeout'] = pool_timeout + + engine = sqlalchemy.create_engine(sql_connection, **engine_args) + + sqlalchemy.event.listen(engine, 'checkin', _thread_yield) + + if engine.name in ['mysql', 'ibm_db_sa']: + ping_callback = functools.partial(_ping_listener, engine) + sqlalchemy.event.listen(engine, 'checkout', ping_callback) + if engine.name == 'mysql': + if mysql_traditional_mode: + mysql_sql_mode = 'TRADITIONAL' + if mysql_sql_mode: + mode_callback = functools.partial(_set_session_sql_mode, + sql_mode=mysql_sql_mode) + sqlalchemy.event.listen(engine, 'checkout', mode_callback) + elif 'sqlite' in connection_dict.drivername: + if not sqlite_synchronous: + sqlalchemy.event.listen(engine, 'connect', + _synchronous_switch_listener) + sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) + + if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb': + _patch_mysqldb_with_stacktrace_comments() + + try: + engine.connect() + except sqla_exc.OperationalError as e: + if not _is_db_connection_error(e.args[0]): + raise + + remaining = max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _LW('SQL connection failed. %s attempts left.') + LOG.warning(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(retry_interval) + try: + engine.connect() + break + except sqla_exc.OperationalError as e: + if (remaining != 'infinite' and remaining == 0) or \ + not _is_db_connection_error(e.args[0]): + raise + return engine + + +class Query(sqlalchemy.orm.query.Query): + """Subclass of sqlalchemy.query with soft_delete() method.""" + def soft_delete(self, synchronize_session='evaluate'): + return self.update({'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow()}, + synchronize_session=synchronize_session) + + +class Session(sqlalchemy.orm.session.Session): + """Custom Session class to avoid SqlAlchemy Session monkey patching.""" + @_wrap_db_error + def query(self, *args, **kwargs): + return super(Session, self).query(*args, **kwargs) + + @_wrap_db_error + def flush(self, *args, **kwargs): + return super(Session, self).flush(*args, **kwargs) + + @_wrap_db_error + def execute(self, *args, **kwargs): + return super(Session, self).execute(*args, **kwargs) + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + class_=Session, + autocommit=autocommit, + expire_on_commit=expire_on_commit, + query_cls=Query) + + +def _patch_mysqldb_with_stacktrace_comments(): + """Adds current stack trace as a comment in queries. + + Patches MySQLdb.cursors.BaseCursor._do_query. + """ + import MySQLdb.cursors + import traceback + + old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query + + def _do_query(self, q): + stack = '' + for filename, line, method, function in traceback.extract_stack(): + # exclude various common things from trace + if filename.endswith('session.py') and method == '_do_query': + continue + if filename.endswith('api.py') and method == 'wrapper': + continue + if filename.endswith('utils.py') and method == '_inner': + continue + if filename.endswith('exception.py') and method == '_wrap': + continue + # db/api is just a wrapper around db/sqlalchemy/api + if filename.endswith('db/api.py'): + continue + # only trace inside rack + index = filename.rfind('rack') + if index == -1: + continue + stack += "File:%s:%s Method:%s() Line:%s | " \ + % (filename[index:], line, method, function) + + # strip trailing " | " from stack + if stack: + stack = stack[:-3] + qq = "%s /* %s */" % (q, stack) + else: + qq = q + old_mysql_do_query(self, qq) + + setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) + + +class EngineFacade(object): + """A helper class for removing of global engine instances from rack.db. + + As a library, rack.db can't decide where to store/when to create engine + and sessionmaker instances, so this must be left for a target application. + + On the other hand, in order to simplify the adoption of rack.db changes, + we'll provide a helper class, which creates engine and sessionmaker + on its instantiation and provides get_engine()/get_session() methods + that are compatible with corresponding utility functions that currently + exist in target projects, e.g. in Nova. + + engine/sessionmaker instances will still be global (and they are meant to + be global), but they will be stored in the app context, rather that in the + rack.db context. + + Note: using of this helper is completely optional and you are encouraged to + integrate engine/sessionmaker instances into your apps any way you like + (e.g. one might want to bind a session to a request context). Two important + things to remember: + 1. An Engine instance is effectively a pool of DB connections, so it's + meant to be shared (and it's thread-safe). + 2. A Session instance is not meant to be shared and represents a DB + transactional context (i.e. it's not thread-safe). sessionmaker is + a factory of sessions. + + """ + + def __init__(self, sql_connection, + sqlite_fk=False, mysql_sql_mode=None, + autocommit=True, expire_on_commit=False, **kwargs): + """Initialize engine and sessionmaker instances. + + :param sqlite_fk: enable foreign keys in SQLite + :type sqlite_fk: bool + + :param mysql_sql_mode: set SQL mode in MySQL + :type mysql_sql_mode: string + + :param autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :param expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + Keyword arguments: + + :keyword idle_timeout: timeout before idle sql connections are reaped + (defaults to 3600) + :keyword connection_debug: verbosity of SQL debugging information. + 0=None, 100=Everything (defaults to 0) + :keyword max_pool_size: maximum number of SQL connections to keep open + in a pool (defaults to SQLAlchemy settings) + :keyword max_overflow: if set, use this value for max_overflow with + sqlalchemy (defaults to SQLAlchemy settings) + :keyword pool_timeout: if set, use this value for pool_timeout with + sqlalchemy (defaults to SQLAlchemy settings) + :keyword sqlite_synchronous: if True, SQLite uses synchronous mode + (defaults to True) + :keyword connection_trace: add python stack traces to SQL as comment + strings (defaults to False) + :keyword max_retries: maximum db connection retries during startup. + (setting -1 implies an infinite retry count) + (defaults to 10) + :keyword retry_interval: interval between retries of opening a sql + connection (defaults to 10) + + """ + + super(EngineFacade, self).__init__() + + self._engine = create_engine( + sql_connection=sql_connection, + sqlite_fk=sqlite_fk, + mysql_sql_mode=mysql_sql_mode, + idle_timeout=kwargs.get('idle_timeout', 3600), + connection_debug=kwargs.get('connection_debug', 0), + max_pool_size=kwargs.get('max_pool_size'), + max_overflow=kwargs.get('max_overflow'), + pool_timeout=kwargs.get('pool_timeout'), + sqlite_synchronous=kwargs.get('sqlite_synchronous', True), + connection_trace=kwargs.get('connection_trace', False), + max_retries=kwargs.get('max_retries', 10), + retry_interval=kwargs.get('retry_interval', 10)) + self._session_maker = get_maker( + engine=self._engine, + autocommit=autocommit, + expire_on_commit=expire_on_commit) + + def get_engine(self): + """Get the engine instance (note, that it's shared).""" + + return self._engine + + def get_session(self, **kwargs): + """Get a Session instance. + + If passed, keyword arguments values override the ones used when the + sessionmaker instance was created. + + :keyword autocommit: use autocommit mode for created Session instances + :type autocommit: bool + + :keyword expire_on_commit: expire session objects on commit + :type expire_on_commit: bool + + """ + + for arg in kwargs: + if arg not in ('autocommit', 'expire_on_commit'): + del kwargs[arg] + + return self._session_maker(**kwargs) diff --git a/rack/openstack/common/db/sqlalchemy/test_base.py b/rack/openstack/common/db/sqlalchemy/test_base.py new file mode 100644 index 0000000..a129da4 --- /dev/null +++ b/rack/openstack/common/db/sqlalchemy/test_base.py @@ -0,0 +1,149 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import functools +import os + +import fixtures +import six + +from rack.openstack.common.db.sqlalchemy import session +from rack.openstack.common.db.sqlalchemy import utils +from rack.openstack.common import test + + +class DbFixture(fixtures.Fixture): + """Basic database fixture. + + Allows to run tests on various db backends, such as SQLite, MySQL and + PostgreSQL. By default use sqlite backend. To override default backend + uri set env variable OS_TEST_DBAPI_CONNECTION with database admin + credentials for specific backend. + """ + + def _get_uri(self): + return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://') + + def __init__(self, test): + super(DbFixture, self).__init__() + + self.test = test + + def setUp(self): + super(DbFixture, self).setUp() + + self.test.engine = session.create_engine(self._get_uri()) + self.test.sessionmaker = session.get_maker(self.test.engine) + self.addCleanup(self.test.engine.dispose) + + +class DbTestCase(test.BaseTestCase): + """Base class for testing of DB code. + + Using `DbFixture`. Intended to be the main database test case to use all + the tests on a given backend with user defined uri. Backend specific + tests should be decorated with `backend_specific` decorator. + """ + + FIXTURE = DbFixture + + def setUp(self): + super(DbTestCase, self).setUp() + self.useFixture(self.FIXTURE(self)) + + +ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql'] + + +def backend_specific(*dialects): + """Decorator to skip backend specific tests on inappropriate engines. + + ::dialects: list of dialects names under which the test will be launched. + """ + def wrap(f): + @functools.wraps(f) + def ins_wrap(self): + if not set(dialects).issubset(ALLOWED_DIALECTS): + raise ValueError( + "Please use allowed dialects: %s" % ALLOWED_DIALECTS) + if self.engine.name not in dialects: + msg = ('The test "%s" can be run ' + 'only on %s. Current engine is %s.') + args = (f.__name__, ' '.join(dialects), self.engine.name) + self.skip(msg % args) + else: + return f(self) + return ins_wrap + return wrap + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticFixture(DbFixture): + """Base fixture to use default CI databases. + + The databases exist in OpenStack CI infrastructure. But for the + correct functioning in local environment the databases must be + created manually. + """ + + DRIVER = abc.abstractproperty(lambda: None) + DBNAME = PASSWORD = USERNAME = 'openstack_citest' + + def _get_uri(self): + return utils.get_connect_string(backend=self.DRIVER, + user=self.USERNAME, + passwd=self.PASSWORD, + database=self.DBNAME) + + +@six.add_metaclass(abc.ABCMeta) +class OpportunisticTestCase(DbTestCase): + """Base test case to use default CI databases. + + The subclasses of the test case are running only when openstack_citest + database is available otherwise a tests will be skipped. + """ + + FIXTURE = abc.abstractproperty(lambda: None) + + def setUp(self): + credentials = { + 'backend': self.FIXTURE.DRIVER, + 'user': self.FIXTURE.USERNAME, + 'passwd': self.FIXTURE.PASSWORD, + 'database': self.FIXTURE.DBNAME} + + if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials): + msg = '%s backend is not available.' % self.FIXTURE.DRIVER + return self.skip(msg) + + super(OpportunisticTestCase, self).setUp() + + +class MySQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'mysql' + + +class PostgreSQLOpportunisticFixture(OpportunisticFixture): + DRIVER = 'postgresql' + + +class MySQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = MySQLOpportunisticFixture + + +class PostgreSQLOpportunisticTestCase(OpportunisticTestCase): + FIXTURE = PostgreSQLOpportunisticFixture diff --git a/rack/openstack/common/db/sqlalchemy/test_migrations.py b/rack/openstack/common/db/sqlalchemy/test_migrations.py new file mode 100644 index 0000000..cccacad --- /dev/null +++ b/rack/openstack/common/db/sqlalchemy/test_migrations.py @@ -0,0 +1,269 @@ +# Copyright 2010-2011 OpenStack Foundation +# Copyright 2012-2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import logging +import os +import subprocess + +import lockfile +from six import moves +from six.moves.urllib import parse +import sqlalchemy +import sqlalchemy.exc + +from rack.openstack.common.db.sqlalchemy import utils +from rack.openstack.common.gettextutils import _LE +from rack.openstack.common import test + +LOG = logging.getLogger(__name__) + + +def _have_mysql(user, passwd, database): + present = os.environ.get('TEST_MYSQL_PRESENT') + if present is None: + return utils.is_backend_avail(backend='mysql', + user=user, + passwd=passwd, + database=database) + return present.lower() in ('', 'true') + + +def _have_postgresql(user, passwd, database): + present = os.environ.get('TEST_POSTGRESQL_PRESENT') + if present is None: + return utils.is_backend_avail(backend='postgres', + user=user, + passwd=passwd, + database=database) + return present.lower() in ('', 'true') + + +def _set_db_lock(lock_path=None, lock_prefix=None): + def decorator(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + try: + path = lock_path or os.environ.get("NOVA_LOCK_PATH") + lock = lockfile.FileLock(os.path.join(path, lock_prefix)) + with lock: + LOG.debug('Got lock "%s"' % f.__name__) + return f(*args, **kwargs) + finally: + LOG.debug('Lock released "%s"' % f.__name__) + return wrapper + return decorator + + +class BaseMigrationTestCase(test.BaseTestCase): + """Base class fort testing of migration utils.""" + + def __init__(self, *args, **kwargs): + super(BaseMigrationTestCase, self).__init__(*args, **kwargs) + + self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), + 'test_migrations.conf') + # Test machines can set the TEST_MIGRATIONS_CONF variable + # to override the location of the config file for migration testing + self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF', + self.DEFAULT_CONFIG_FILE) + self.test_databases = {} + self.migration_api = None + + def setUp(self): + super(BaseMigrationTestCase, self).setUp() + + # Load test databases from the config file. Only do this + # once. No need to re-run this on each test... + LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) + if os.path.exists(self.CONFIG_FILE_PATH): + cp = moves.configparser.RawConfigParser() + try: + cp.read(self.CONFIG_FILE_PATH) + defaults = cp.defaults() + for key, value in defaults.items(): + self.test_databases[key] = value + except moves.configparser.ParsingError as e: + self.fail("Failed to read test_migrations.conf config " + "file. Got error: %s" % e) + else: + self.fail("Failed to find test_migrations.conf config " + "file.") + + self.engines = {} + for key, value in self.test_databases.items(): + self.engines[key] = sqlalchemy.create_engine(value) + + # We start each test case with a completely blank slate. + self._reset_databases() + + def tearDown(self): + # We destroy the test data store between each test case, + # and recreate it, which ensures that we have no side-effects + # from the tests + self._reset_databases() + super(BaseMigrationTestCase, self).tearDown() + + def execute_cmd(self, cmd=None): + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + output = process.communicate()[0] + LOG.debug(output) + self.assertEqual(0, process.returncode, + "Failed to run: %s\n%s" % (cmd, output)) + + def _reset_pg(self, conn_pieces): + (user, + password, + database, + host) = utils.get_db_connection_info(conn_pieces) + os.environ['PGPASSWORD'] = password + os.environ['PGUSER'] = user + # note(boris-42): We must create and drop database, we can't + # drop database which we have connected to, so for such + # operations there is a special database template1. + sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" + " '%(sql)s' -d template1") + + sql = ("drop database if exists %s;") % database + droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql} + self.execute_cmd(droptable) + + sql = ("create database %s;") % database + createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql} + self.execute_cmd(createtable) + + os.unsetenv('PGPASSWORD') + os.unsetenv('PGUSER') + + @_set_db_lock(lock_prefix='migration_tests-') + def _reset_databases(self): + for key, engine in self.engines.items(): + conn_string = self.test_databases[key] + conn_pieces = parse.urlparse(conn_string) + engine.dispose() + if conn_string.startswith('sqlite'): + # We can just delete the SQLite database, which is + # the easiest and cleanest solution + db_path = conn_pieces.path.strip('/') + if os.path.exists(db_path): + os.unlink(db_path) + # No need to recreate the SQLite DB. SQLite will + # create it for us if it's not there... + elif conn_string.startswith('mysql'): + # We can execute the MySQL client to destroy and re-create + # the MYSQL database, which is easier and less error-prone + # than using SQLAlchemy to do this via MetaData...trust me. + (user, password, database, host) = \ + utils.get_db_connection_info(conn_pieces) + sql = ("drop database if exists %(db)s; " + "create database %(db)s;") % {'db': database} + cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s " + "-e \"%(sql)s\"") % {'user': user, 'password': password, + 'host': host, 'sql': sql} + self.execute_cmd(cmd) + elif conn_string.startswith('postgresql'): + self._reset_pg(conn_pieces) + + +class WalkVersionsMixin(object): + def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): + # Determine latest version script from the repo, then + # upgrade from 1 through to the latest, with no data + # in the databases. This just checks that the schema itself + # upgrades successfully. + + # Place the database under version control + self.migration_api.version_control(engine, self.REPOSITORY, + self.INIT_VERSION) + self.assertEqual(self.INIT_VERSION, + self.migration_api.db_version(engine, + self.REPOSITORY)) + + LOG.debug('latest version is %s' % self.REPOSITORY.latest) + versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) + + for version in versions: + # upgrade -> downgrade -> upgrade + self._migrate_up(engine, version, with_data=True) + if snake_walk: + downgraded = self._migrate_down( + engine, version - 1, with_data=True) + if downgraded: + self._migrate_up(engine, version) + + if downgrade: + # Now walk it back down to 0 from the latest, testing + # the downgrade paths. + for version in reversed(versions): + # downgrade -> upgrade -> downgrade + downgraded = self._migrate_down(engine, version - 1) + + if snake_walk and downgraded: + self._migrate_up(engine, version) + self._migrate_down(engine, version - 1) + + def _migrate_down(self, engine, version, with_data=False): + try: + self.migration_api.downgrade(engine, self.REPOSITORY, version) + except NotImplementedError: + # NOTE(sirp): some migrations, namely release-level + # migrations, don't support a downgrade. + return False + + self.assertEqual( + version, self.migration_api.db_version(engine, self.REPOSITORY)) + + # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target' + # version). So if we have any downgrade checks, they need to be run for + # the previous (higher numbered) migration. + if with_data: + post_downgrade = getattr( + self, "_post_downgrade_%03d" % (version + 1), None) + if post_downgrade: + post_downgrade(engine) + + return True + + def _migrate_up(self, engine, version, with_data=False): + """migrate up to a new version of the db. + + We allow for data insertion and post checks at every + migration version with special _pre_upgrade_### and + _check_### functions in the main test. + """ + # NOTE(sdague): try block is here because it's impossible to debug + # where a failed data migration happens otherwise + try: + if with_data: + data = None + pre_upgrade = getattr( + self, "_pre_upgrade_%03d" % version, None) + if pre_upgrade: + data = pre_upgrade(engine) + + self.migration_api.upgrade(engine, self.REPOSITORY, version) + self.assertEqual(version, + self.migration_api.db_version(engine, + self.REPOSITORY)) + if with_data: + check = getattr(self, "_check_%03d" % version, None) + if check: + check(engine, data) + except Exception: + LOG.error(_LE("Failed to migrate to version %s on engine %s") % + (version, engine)) + raise diff --git a/rack/openstack/common/db/sqlalchemy/utils.py b/rack/openstack/common/db/sqlalchemy/utils.py new file mode 100644 index 0000000..0561ee3 --- /dev/null +++ b/rack/openstack/common/db/sqlalchemy/utils.py @@ -0,0 +1,638 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack Foundation. +# Copyright 2012 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import re + +from migrate.changeset import UniqueConstraint +import sqlalchemy +from sqlalchemy import Boolean +from sqlalchemy import CheckConstraint +from sqlalchemy import Column +from sqlalchemy.engine import reflection +from sqlalchemy.ext.compiler import compiles +from sqlalchemy import func +from sqlalchemy import Index +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy import or_ +from sqlalchemy.sql.expression import literal_column +from sqlalchemy.sql.expression import UpdateBase +from sqlalchemy.sql import select +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy.types import NullType + +from rack.openstack.common import context as request_context +from rack.openstack.common.db.sqlalchemy import models +from rack.openstack.common.gettextutils import _, _LI, _LW +from rack.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + +_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+") + + +def sanitize_db_url(url): + match = _DBURL_REGEX.match(url) + if match: + return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):]) + return url + + +class InvalidSortKey(Exception): + message = _("Sort key supplied was not valid.") + + +# copy from glance/db/sqlalchemy/api.py +def paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + try: + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + except KeyError: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise InvalidSortKey() + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in range(len(sort_keys)): + crit_attrs = [] + for j in range(i): + model_attr = getattr(model, sort_keys[j]) + crit_attrs.append((model_attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + else: + crit_attrs.append((model_attr > marker_values[i])) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query + + +def _read_deleted_filter(query, db_model, read_deleted): + if 'deleted' not in db_model.__table__.columns: + raise ValueError(_("There is no `deleted` column in `%s` table. " + "Project doesn't use soft-deleted feature.") + % db_model.__name__) + + default_deleted_value = db_model.__table__.c.deleted.default.arg + if read_deleted == 'no': + query = query.filter(db_model.deleted == default_deleted_value) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter(db_model.deleted != default_deleted_value) + else: + raise ValueError(_("Unrecognized read_deleted value '%s'") + % read_deleted) + return query + + +def _project_filter(query, db_model, context, project_only): + if project_only and 'project_id' not in db_model.__table__.columns: + raise ValueError(_("There is no `project_id` column in `%s` table.") + % db_model.__name__) + + if request_context.is_user_context(context) and project_only: + if project_only == 'allow_none': + is_none = None + query = query.filter(or_(db_model.project_id == context.project_id, + db_model.project_id == is_none)) + else: + query = query.filter(db_model.project_id == context.project_id) + + return query + + +def model_query(context, model, session, args=None, project_only=False, + read_deleted=None): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + + :param model: Model to query. Must be a subclass of ModelBase. + :type model: models.ModelBase + + :param session: The session to use. + :type session: sqlalchemy.orm.session.Session + + :param args: Arguments to query. If None - model is used. + :type args: tuple + + :param project_only: If present and context is user-type, then restrict + query to match the context's project_id. If set to + 'allow_none', restriction includes project_id = None. + :type project_only: bool + + :param read_deleted: If present, overrides context's read_deleted field. + :type read_deleted: bool + + Usage: + result = (utils.model_query(context, models.Instance, session=session) + .filter_by(uuid=instance_uuid) + .all()) + + query = utils.model_query( + context, Node, + session=session, + args=(func.count(Node.id), func.sum(Node.ram)) + ).filter_by(project_id=project_id) + """ + + if not read_deleted: + if hasattr(context, 'read_deleted'): + # NOTE(viktors): some projects use `read_deleted` attribute in + # their contexts instead of `show_deleted`. + read_deleted = context.read_deleted + else: + read_deleted = context.show_deleted + + if not issubclass(model, models.ModelBase): + raise TypeError(_("model should be a subclass of ModelBase")) + + query = session.query(model) if not args else session.query(*args) + query = _read_deleted_filter(query, model, read_deleted) + query = _project_filter(query, model, context, project_only) + + return query + + +def get_table(engine, name): + """Returns an sqlalchemy table dynamically from db. + + Needed because the models don't work for us in migrations + as models will be far out of sync with the current data. + """ + metadata = MetaData() + metadata.bind = engine + return Table(name, metadata, autoload=True) + + +class InsertFromSelect(UpdateBase): + """Form the base for `INSERT INTO table (SELECT ... )` statement.""" + def __init__(self, table, select): + self.table = table + self.select = select + + +@compiles(InsertFromSelect) +def visit_insert_from_select(element, compiler, **kw): + """Form the `INSERT INTO table (SELECT ... )` statement.""" + return "INSERT INTO %s %s" % ( + compiler.process(element.table, asfrom=True), + compiler.process(element.select)) + + +class ColumnError(Exception): + """Error raised when no column or an invalid column is found.""" + + +def _get_not_supported_column(col_name_col_instance, column_name): + try: + column = col_name_col_instance[column_name] + except KeyError: + msg = _("Please specify column %s in col_name_col_instance " + "param. It is required because column has unsupported " + "type by sqlite).") + raise ColumnError(msg % column_name) + + if not isinstance(column, Column): + msg = _("col_name_col_instance param has wrong type of " + "column instance for column %s It should be instance " + "of sqlalchemy.Column.") + raise ColumnError(msg % column_name) + return column + + +def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, + **col_name_col_instance): + """Drop unique constraint from table. + + This method drops UC from table and works for mysql, postgresql and sqlite. + In mysql and postgresql we are able to use "alter table" construction. + Sqlalchemy doesn't support some sqlite column types and replaces their + type with NullType in metadata. We process these columns and replace + NullType with the correct column type. + + :param migrate_engine: sqlalchemy engine + :param table_name: name of table that contains uniq constraint. + :param uc_name: name of uniq constraint that will be dropped. + :param columns: columns that are in uniq constraint. + :param col_name_col_instance: contains pair column_name=column_instance. + column_instance is instance of Column. These params + are required only for columns that have unsupported + types by sqlite. For example BigInteger. + """ + + meta = MetaData() + meta.bind = migrate_engine + t = Table(table_name, meta, autoload=True) + + if migrate_engine.name == "sqlite": + override_cols = [ + _get_not_supported_column(col_name_col_instance, col.name) + for col in t.columns + if isinstance(col.type, NullType) + ] + for col in override_cols: + t.columns.replace(col) + + uc = UniqueConstraint(*columns, table=t, name=uc_name) + uc.drop() + + +def drop_old_duplicate_entries_from_table(migrate_engine, table_name, + use_soft_delete, *uc_column_names): + """Drop all old rows having the same values for columns in uc_columns. + + This method drop (or mark ad `deleted` if use_soft_delete is True) old + duplicate rows form table with name `table_name`. + + :param migrate_engine: Sqlalchemy engine + :param table_name: Table with duplicates + :param use_soft_delete: If True - values will be marked as `deleted`, + if False - values will be removed from table + :param uc_column_names: Unique constraint columns + """ + meta = MetaData() + meta.bind = migrate_engine + + table = Table(table_name, meta, autoload=True) + columns_for_group_by = [table.c[name] for name in uc_column_names] + + columns_for_select = [func.max(table.c.id)] + columns_for_select.extend(columns_for_group_by) + + duplicated_rows_select = select(columns_for_select, + group_by=columns_for_group_by, + having=func.count(table.c.id) > 1) + + for row in migrate_engine.execute(duplicated_rows_select): + # NOTE(boris-42): Do not remove row that has the biggest ID. + delete_condition = table.c.id != row[0] + is_none = None # workaround for pyflakes + delete_condition &= table.c.deleted_at == is_none + for name in uc_column_names: + delete_condition &= table.c[name] == row[name] + + rows_to_delete_select = select([table.c.id]).where(delete_condition) + for row in migrate_engine.execute(rows_to_delete_select).fetchall(): + LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: " + "%(table)s") % dict(id=row[0], table=table_name)) + + if use_soft_delete: + delete_statement = table.update().\ + where(delete_condition).\ + values({ + 'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow() + }) + else: + delete_statement = table.delete().where(delete_condition) + migrate_engine.execute(delete_statement) + + +def _get_default_deleted_value(table): + if isinstance(table.c.id.type, Integer): + return 0 + if isinstance(table.c.id.type, String): + return "" + raise ColumnError(_("Unsupported id columns type")) + + +def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): + table = get_table(migrate_engine, table_name) + + insp = reflection.Inspector.from_engine(migrate_engine) + real_indexes = insp.get_indexes(table_name) + existing_index_names = dict( + [(index['name'], index['column_names']) for index in real_indexes]) + + # NOTE(boris-42): Restore indexes on `deleted` column + for index in indexes: + if 'deleted' not in index['column_names']: + continue + name = index['name'] + if name in existing_index_names: + column_names = [table.c[c] for c in existing_index_names[name]] + old_index = Index(name, *column_names, unique=index["unique"]) + old_index.drop(migrate_engine) + + column_names = [table.c[c] for c in index['column_names']] + new_index = Index(index["name"], *column_names, unique=index["unique"]) + new_index.create(migrate_engine) + + +def change_deleted_column_type_to_boolean(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_boolean_sqlite( + migrate_engine, table_name, **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + old_deleted = Column('old_deleted', Boolean, default=False) + old_deleted.create(table, populate_default=False) + + table.update().\ + where(table.c.deleted == table.c.id).\ + values(old_deleted=True).\ + execute() + + table.c.deleted.drop() + table.c.old_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, + **col_name_col_instance): + insp = reflection.Inspector.from_engine(migrate_engine) + table = get_table(migrate_engine, table_name) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', Boolean, default=0) + columns.append(column_copy) + + constraints = [constraint.copy() for constraint in table.constraints] + + meta = table.metadata + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + c_select = [] + for c in table.c: + if c.name != "deleted": + c_select.append(c) + else: + c_select.append(table.c.deleted == table.c.id) + + ins = InsertFromSelect(new_table, select(c_select)) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + new_table.update().\ + where(new_table.c.deleted == new_table.c.id).\ + values(deleted=True).\ + execute() + + +def change_deleted_column_type_to_id_type(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_id_type_sqlite( + migrate_engine, table_name, **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + new_deleted = Column('new_deleted', table.c.id.type, + default=_get_default_deleted_value(table)) + new_deleted.create(table, populate_default=True) + + deleted = True # workaround for pyflakes + table.update().\ + where(table.c.deleted == deleted).\ + values(new_deleted=table.c.id).\ + execute() + table.c.deleted.drop() + table.c.new_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, + **col_name_col_instance): + # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check + # constraints in sqlite DB and our `deleted` column has + # 2 check constraints. So there is only one way to remove + # these constraints: + # 1) Create new table with the same columns, constraints + # and indexes. (except deleted column). + # 2) Copy all data from old to new table. + # 3) Drop old table. + # 4) Rename new table to old table name. + insp = reflection.Inspector.from_engine(migrate_engine) + meta = MetaData(bind=migrate_engine) + table = Table(table_name, meta, autoload=True) + default_deleted_value = _get_default_deleted_value(table) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', table.c.id.type, + default=default_deleted_value) + columns.append(column_copy) + + def is_deleted_column_constraint(constraint): + # NOTE(boris-42): There is no other way to check is CheckConstraint + # associated with deleted column. + if not isinstance(constraint, CheckConstraint): + return False + sqltext = str(constraint.sqltext) + return (sqltext.endswith("deleted in (0, 1)") or + sqltext.endswith("deleted IN (:deleted_1, :deleted_2)")) + + constraints = [] + for constraint in table.constraints: + if not is_deleted_column_constraint(constraint): + constraints.append(constraint.copy()) + + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + ins = InsertFromSelect(new_table, table.select()) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + deleted = True # workaround for pyflakes + new_table.update().\ + where(new_table.c.deleted == deleted).\ + values(deleted=new_table.c.id).\ + execute() + + # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. + deleted = False # workaround for pyflakes + new_table.update().\ + where(new_table.c.deleted == deleted).\ + values(deleted=default_deleted_value).\ + execute() + + +def get_connect_string(backend, database, user=None, passwd=None): + """Get database connection + + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped + """ + args = {'backend': backend, + 'user': user, + 'passwd': passwd, + 'database': database} + if backend == 'sqlite': + template = '%(backend)s:///%(database)s' + else: + template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" + return template % args + + +def is_backend_avail(backend, database, user=None, passwd=None): + try: + connect_uri = get_connect_string(backend=backend, + database=database, + user=user, + passwd=passwd) + engine = sqlalchemy.create_engine(connect_uri) + connection = engine.connect() + except Exception: + # intentionally catch all to handle exceptions even if we don't + # have any backend code loaded. + return False + else: + connection.close() + engine.dispose() + return True + + +def get_db_connection_info(conn_pieces): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + password = auth_pieces[1].strip() + + return (user, password, database, host) diff --git a/rack/openstack/common/eventlet_backdoor.py b/rack/openstack/common/eventlet_backdoor.py new file mode 100644 index 0000000..e1aad43 --- /dev/null +++ b/rack/openstack/common/eventlet_backdoor.py @@ -0,0 +1,146 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import errno +import gc +import os +import pprint +import socket +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet +from oslo.config import cfg + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +help_for_backdoor_port = 'Acceptable ' + \ + 'values are 0, and :, where 0 results in ' + \ + 'listening on a random tcp port number, results in ' + \ + 'listening on the specified port number and not enabling backdoor' + \ + 'if it is in use and : results in listening on the ' + \ + 'smallest unused port number within the specified range of port ' + \ + 'numbers. The chosen port is displayed in the service\'s log file.' +eventlet_backdoor_opts = [ + cfg.StrOpt('backdoor_port', + default=None, + help='Enable eventlet backdoor. %s' % help_for_backdoor_port) +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) +LOG = logging.getLogger(__name__) + + +class EventletBackdoorConfigValueError(Exception): + def __init__(self, port_range, help_msg, ex): + msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' + '%(help)s' % + {'range': port_range, 'ex': ex, 'help': help_msg}) + super(EventletBackdoorConfigValueError, self).__init__(msg) + self.port_range = port_range + + +def _dont_use_this(): + print("Don't use this, just disconnect instead") + + +def _find_objects(t): + return filter(lambda o: isinstance(o, t), gc.get_objects()) + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print(i, gt) + traceback.print_stack(gt.gr_frame) + print() + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print(threadId) + traceback.print_stack(stack) + print() + + +def _parse_port_range(port_range): + if ':' not in port_range: + start, end = port_range, port_range + else: + start, end = port_range.split(':', 1) + try: + start, end = int(start), int(end) + if end < start: + raise ValueError + return start, end + except ValueError as ex: + raise EventletBackdoorConfigValueError(port_range, ex, + help_for_backdoor_port) + + +def _listen(host, start_port, end_port, listen_func): + try_port = start_port + while True: + try: + return listen_func((host, try_port)) + except socket.error as exc: + if (exc.errno != errno.EADDRINUSE or + try_port >= end_port): + raise + try_port += 1 + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = _listen('localhost', start_port, end_port, eventlet.listen) + + # In the case of backdoor port being zero, a port number is assigned by + # listen(). In any case, pull the port number out here. + port = sock.getsockname()[1] + LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') % + {'port': port, 'pid': os.getpid()}) + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/rack/openstack/common/excutils.py b/rack/openstack/common/excutils.py new file mode 100644 index 0000000..3b73dc0 --- /dev/null +++ b/rack/openstack/common/excutils.py @@ -0,0 +1,99 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception related utilities. +""" + +import logging +import sys +import time +import traceback + +import six + +from rack.openstack.common.gettextutils import _ # noqa + + +class save_and_reraise_exception(object): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be re-raised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is re-raised. + + In some cases the caller may not want to re-raise the exception, and + for those circumstances this context provides a reraise flag that + can be used to suppress the exception. For example: + + except Exception: + with save_and_reraise_exception() as ctxt: + decide_if_need_reraise() + if not should_be_reraised: + ctxt.reraise = False + """ + def __init__(self): + self.reraise = True + + def __enter__(self): + self.type_, self.value, self.tb, = sys.exc_info() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + logging.error(_('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) + return False + if self.reraise: + six.reraise(self.type_, self.value, self.tb) + + +def forever_retry_uncaught_exceptions(infunc): + def inner_func(*args, **kwargs): + last_log_time = 0 + last_exc_message = None + exc_count = 0 + while True: + try: + return infunc(*args, **kwargs) + except Exception as exc: + this_exc_message = six.u(str(exc)) + if this_exc_message == last_exc_message: + exc_count += 1 + else: + exc_count = 1 + # Do not log any more frequently than once a minute unless + # the exception message changes + cur_time = int(time.time()) + if (cur_time - last_log_time > 60 or + this_exc_message != last_exc_message): + logging.exception( + _('Unexpected exception occurred %d time(s)... ' + 'retrying.') % exc_count) + last_log_time = cur_time + last_exc_message = this_exc_message + exc_count = 0 + # This should be a very rare event. In case it isn't, do + # a sleep. + time.sleep(1) + return inner_func diff --git a/rack/openstack/common/fileutils.py b/rack/openstack/common/fileutils.py new file mode 100644 index 0000000..8f539e0 --- /dev/null +++ b/rack/openstack/common/fileutils.py @@ -0,0 +1,137 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import contextlib +import errno +import os +import tempfile + +from rack.openstack.common import excutils +from rack.openstack.common.gettextutils import _ # noqa +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +_FILE_CACHE = {} + + +def ensure_tree(path): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise + + +def read_cached_file(filename, force_reload=False): + """Read from a file if it has been modified. + + :param force_reload: Whether to reload the file. + :returns: A tuple with a boolean specifying if the data is fresh + or not. + """ + global _FILE_CACHE + + if force_reload and filename in _FILE_CACHE: + del _FILE_CACHE[filename] + + reloaded = False + mtime = os.path.getmtime(filename) + cache_info = _FILE_CACHE.setdefault(filename, {}) + + if not cache_info or mtime > cache_info.get('mtime', 0): + LOG.debug(_("Reloading cached file %s") % filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + reloaded = True + return (reloaded, cache_info['data']) + + +def delete_if_exists(path, remove=os.unlink): + """Delete a file, but ignore file not found error. + + :param path: File to delete + :param remove: Optional function to remove passed path + """ + + try: + remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +@contextlib.contextmanager +def remove_path_on_error(path, remove=delete_if_exists): + """Protect code that wants to operate on PATH atomically. + Any exception will cause PATH to be removed. + + :param path: File to work with + :param remove: Optional function to remove passed path + """ + + try: + yield + except Exception: + with excutils.save_and_reraise_exception(): + remove(path) + + +def file_open(*args, **kwargs): + """Open file + + see built-in file() documentation for more details + + Note: The reason this is kept in a separate module is to easily + be able to provide a stub module that doesn't alter system + state at all (for unit tests) + """ + return file(*args, **kwargs) + + +def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): + """Create temporary file or use existing file. + + This util is needed for creating temporary file with + specified content, suffix and prefix. If path is not None, + it will be used for writing content. If the path doesn't + exist it'll be created. + + :param content: content for temporary file. + :param path: same as parameter 'dir' for mkstemp + :param suffix: same as parameter 'suffix' for mkstemp + :param prefix: same as parameter 'prefix' for mkstemp + + For example: it can be used in database tests for creating + configuration files. + """ + if path: + ensure_tree(path) + + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) + try: + os.write(fd, content) + finally: + os.close(fd) + return path diff --git a/rack/openstack/common/fixture/__init__.py b/rack/openstack/common/fixture/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/openstack/common/fixture/config.py b/rack/openstack/common/fixture/config.py new file mode 100644 index 0000000..9489b85 --- /dev/null +++ b/rack/openstack/common/fixture/config.py @@ -0,0 +1,85 @@ +# +# Copyright 2013 Mirantis, Inc. +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +from oslo.config import cfg +import six + + +class Config(fixtures.Fixture): + """Allows overriding configuration settings for the test. + + `conf` will be reset on cleanup. + + """ + + def __init__(self, conf=cfg.CONF): + self.conf = conf + + def setUp(self): + super(Config, self).setUp() + # NOTE(morganfainberg): unregister must be added to cleanup before + # reset is because cleanup works in reverse order of registered items, + # and a reset must occur before unregistering options can occur. + self.addCleanup(self._unregister_config_opts) + self.addCleanup(self.conf.reset) + self._registered_config_opts = {} + + def config(self, **kw): + """Override configuration values. + + The keyword arguments are the names of configuration options to + override and their values. + + If a `group` argument is supplied, the overrides are applied to + the specified configuration option group, otherwise the overrides + are applied to the ``default`` group. + + """ + + group = kw.pop('group', None) + for k, v in six.iteritems(kw): + self.conf.set_override(k, v, group) + + def _unregister_config_opts(self): + for group in self._registered_config_opts: + self.conf.unregister_opts(self._registered_config_opts[group], + group=group) + + def register_opt(self, opt, group=None): + """Register a single option for the test run. + + Options registered in this manner will automatically be unregistered + during cleanup. + + If a `group` argument is supplied, it will register the new option + to that group, otherwise the option is registered to the ``default`` + group. + """ + self.conf.register_opt(opt, group=group) + self._registered_config_opts.setdefault(group, set()).add(opt) + + def register_opts(self, opts, group=None): + """Register multiple options for the test run. + + This works in the same manner as register_opt() but takes a list of + options as the first argument. All arguments will be registered to the + same group if the ``group`` argument is supplied, otherwise all options + will be registered to the ``default`` group. + """ + for opt in opts: + self.register_opt(opt, group=group) diff --git a/rack/openstack/common/fixture/lockutils.py b/rack/openstack/common/fixture/lockutils.py new file mode 100644 index 0000000..f8e89ea --- /dev/null +++ b/rack/openstack/common/fixture/lockutils.py @@ -0,0 +1,51 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures + +from rack.openstack.common import lockutils + + +class LockFixture(fixtures.Fixture): + """External locking fixture. + + This fixture is basically an alternative to the synchronized decorator with + the external flag so that tearDowns and addCleanups will be included in + the lock context for locking between tests. The fixture is recommended to + be the first line in a test method, like so:: + + def test_method(self): + self.useFixture(LockFixture) + ... + + or the first line in setUp if all the test methods in the class are + required to be serialized. Something like:: + + class TestCase(testtools.testcase): + def setUp(self): + self.useFixture(LockFixture) + super(TestCase, self).setUp() + ... + + This is because addCleanups are put on a LIFO queue that gets run after the + test method exits. (either by completing or raising an exception) + """ + def __init__(self, name, lock_file_prefix=None): + self.mgr = lockutils.lock(name, lock_file_prefix, True) + + def setUp(self): + super(LockFixture, self).setUp() + self.addCleanup(self.mgr.__exit__, None, None, None) + self.mgr.__enter__() diff --git a/rack/openstack/common/fixture/logging.py b/rack/openstack/common/fixture/logging.py new file mode 100644 index 0000000..3823a03 --- /dev/null +++ b/rack/openstack/common/fixture/logging.py @@ -0,0 +1,34 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures + + +def get_logging_handle_error_fixture(): + """returns a fixture to make logging raise formatting exceptions. + + Usage: + self.useFixture(logging.get_logging_handle_error_fixture()) + """ + return fixtures.MonkeyPatch('logging.Handler.handleError', + _handleError) + + +def _handleError(self, record): + """Monkey patch for logging.Handler.handleError. + + The default handleError just logs the error to stderr but we want + the option of actually raising an exception. + """ + raise diff --git a/rack/openstack/common/fixture/mockpatch.py b/rack/openstack/common/fixture/mockpatch.py new file mode 100644 index 0000000..a8ffeb3 --- /dev/null +++ b/rack/openstack/common/fixture/mockpatch.py @@ -0,0 +1,51 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import mock + + +class PatchObject(fixtures.Fixture): + """Deal with code around mock.""" + + def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs): + self.obj = obj + self.attr = attr + self.kwargs = kwargs + self.new = new + + def setUp(self): + super(PatchObject, self).setUp() + _p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs) + self.mock = _p.start() + self.addCleanup(_p.stop) + + +class Patch(fixtures.Fixture): + + """Deal with code around mock.patch.""" + + def __init__(self, obj, new=mock.DEFAULT, **kwargs): + self.obj = obj + self.kwargs = kwargs + self.new = new + + def setUp(self): + super(Patch, self).setUp() + _p = mock.patch(self.obj, self.new, **self.kwargs) + self.mock = _p.start() + self.addCleanup(_p.stop) diff --git a/rack/openstack/common/fixture/moxstubout.py b/rack/openstack/common/fixture/moxstubout.py new file mode 100644 index 0000000..d7e118e --- /dev/null +++ b/rack/openstack/common/fixture/moxstubout.py @@ -0,0 +1,32 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +from six.moves import mox # noqa + + +class MoxStubout(fixtures.Fixture): + """Deal with code around mox and stubout as a fixture.""" + + def setUp(self): + super(MoxStubout, self).setUp() + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = self.mox.stubs + self.addCleanup(self.mox.UnsetStubs) + self.addCleanup(self.mox.VerifyAll) diff --git a/rack/openstack/common/gettextutils.py b/rack/openstack/common/gettextutils.py new file mode 100644 index 0000000..a5748ab --- /dev/null +++ b/rack/openstack/common/gettextutils.py @@ -0,0 +1,474 @@ +# Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from rack.openstack.common.gettextutils import _ +""" + +import copy +import functools +import gettext +import locale +from logging import handlers +import os +import re + +from babel import localedata +import six + +_localedir = os.environ.get('rack'.upper() + '_LOCALEDIR') +_t = gettext.translation('rack', localedir=_localedir, fallback=True) + +# We use separate translation catalogs for each log level, so set up a +# mapping between the log level name and the translator. The domain +# for the log level is project_name + "-log-" + log_level so messages +# for each level end up in their own catalog. +_t_log_levels = dict( + (level, gettext.translation('rack' + '-log-' + level, + localedir=_localedir, + fallback=True)) + for level in ['info', 'warning', 'error', 'critical'] +) + +_AVAILABLE_LANGUAGES = {} +USE_LAZY = False + + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + global USE_LAZY + USE_LAZY = True + + +def _(msg): + if USE_LAZY: + return Message(msg, domain='rack') + else: + if six.PY3: + return _t.gettext(msg) + return _t.ugettext(msg) + + +def _log_translation(msg, level): + """Build a single translation of a log message + """ + if USE_LAZY: + return Message(msg, domain='rack' + '-log-' + level) + else: + translator = _t_log_levels[level] + if six.PY3: + return translator.gettext(msg) + return translator.ugettext(msg) + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = functools.partial(_log_translation, level='info') +_LW = functools.partial(_log_translation, level='warning') +_LE = functools.partial(_log_translation, level='error') +_LC = functools.partial(_log_translation, level='critical') + + +def install(domain, lazy=False): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + :param domain: the translation domain + :param lazy: indicates whether or not to install the lazy _() function. + The lazy _() introduces a way to do deferred translation + of messages by installing a _ that builds Message objects, + instead of strings, which can then be lazily translated into + any available locale. + """ + if lazy: + # NOTE(mrodden): Lazy gettext functionality. + # + # The following introduces a deferred way to do translations on + # messages in OpenStack. We override the standard _() function + # and % (format string) operation to build Message objects that can + # later be translated when we have more information. + def _lazy_gettext(msg): + """Create and return a Message object. + + Lazy gettext function for a given domain, it is a factory method + for a project/module to get a lazy gettext function for its own + translation domain (i.e. rack, glance, cinder, etc.) + + Message encapsulates a string so that we can translate + it later when needed. + """ + return Message(msg, domain=domain) + + from six import moves + moves.builtins.__dict__['_'] = _lazy_gettext + else: + localedir = '%s_LOCALEDIR' % domain.upper() + if six.PY3: + gettext.install(domain, + localedir=os.environ.get(localedir)) + else: + gettext.install(domain, + localedir=os.environ.get(localedir), + unicode=True) + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, + domain='rack', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + params = self._trim_dictionary_parameters(other) + else: + params = self._copy_param(other) + return params + + def _trim_dictionary_parameters(self, dict_param): + """Return a dict that only has matching entries in the msgid.""" + # NOTE(luisg): Here we trim down the dictionary passed as parameters + # to avoid carrying a lot of unnecessary weight around in the message + # object, for example if someone passes in Message() % locals() but + # only some params are used, and additionally we prevent errors for + # non-deepcopyable objects by unicoding() them. + + # Look for %(param) keys in msgid; + # Skip %% and deal with the case where % is first character on the line + keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) + + # If we don't find any %(param) keys but have a %s + if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): + # Apparently the full dictionary is the parameter + params = self._copy_param(dict_param) + else: + params = {} + # Save our existing parameters as defaults to protect + # ourselves from losing values if we are called through an + # (erroneous) chain that builds a valid Message with + # arguments, and then does something like "msg % kwds" + # where kwds is an empty dictionary. + src = {} + if isinstance(self.params, dict): + src.update(self.params) + src.update(dict_param) + for key in keys: + params[key] = self._copy_param(src[key]) + + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except TypeError: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale, alias) in six.iteritems(aliases): + if locale in language_list and alias not in language_list: + language_list.append(alias) + + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/rack/openstack/common/imageutils.py b/rack/openstack/common/imageutils.py new file mode 100644 index 0000000..7b3f94d --- /dev/null +++ b/rack/openstack/common/imageutils.py @@ -0,0 +1,144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper methods to deal with images. +""" + +import re + +from rack.openstack.common.gettextutils import _ # noqa +from rack.openstack.common import strutils + + +class QemuImgInfo(object): + BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:" + r"\s+(.*?)\)\s*$"), re.I) + TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$") + SIZE_RE = re.compile(r"\(\s*(\d+)\s+bytes\s*\)", re.I) + + def __init__(self, cmd_output=None): + details = self._parse(cmd_output or '') + self.image = details.get('image') + self.backing_file = details.get('backing_file') + self.file_format = details.get('file_format') + self.virtual_size = details.get('virtual_size') + self.cluster_size = details.get('cluster_size') + self.disk_size = details.get('disk_size') + self.snapshots = details.get('snapshot_list', []) + self.encryption = details.get('encryption') + + def __str__(self): + lines = [ + 'image: %s' % self.image, + 'file_format: %s' % self.file_format, + 'virtual_size: %s' % self.virtual_size, + 'disk_size: %s' % self.disk_size, + 'cluster_size: %s' % self.cluster_size, + 'backing_file: %s' % self.backing_file, + ] + if self.snapshots: + lines.append("snapshots: %s" % self.snapshots) + return "\n".join(lines) + + def _canonicalize(self, field): + # Standardize on underscores/lc/no dash and no spaces + # since qemu seems to have mixed outputs here... and + # this format allows for better integration with python + # - ie for usage in kwargs and such... + field = field.lower().strip() + for c in (" ", "-"): + field = field.replace(c, '_') + return field + + def _extract_bytes(self, details): + # Replace it with the byte amount + real_size = self.SIZE_RE.search(details) + if real_size: + details = real_size.group(1) + try: + details = strutils.to_bytes(details) + except TypeError: + pass + return details + + def _extract_details(self, root_cmd, root_details, lines_after): + real_details = root_details + if root_cmd == 'backing_file': + # Replace it with the real backing file + backing_match = self.BACKING_FILE_RE.match(root_details) + if backing_match: + real_details = backing_match.group(2).strip() + elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']: + # Replace it with the byte amount (if we can convert it) + real_details = self._extract_bytes(root_details) + elif root_cmd == 'file_format': + real_details = real_details.strip().lower() + elif root_cmd == 'snapshot_list': + # Next line should be a header, starting with 'ID' + if not lines_after or not lines_after[0].startswith("ID"): + msg = _("Snapshot list encountered but no header found!") + raise ValueError(msg) + del lines_after[0] + real_details = [] + # This is the sprintf pattern we will try to match + # "%-10s%-20s%7s%20s%15s" + # ID TAG VM SIZE DATE VM CLOCK (current header) + while lines_after: + line = lines_after[0] + line_pieces = line.split() + if len(line_pieces) != 6: + break + # Check against this pattern in the final position + # "%02d:%02d:%02d.%03d" + date_pieces = line_pieces[5].split(":") + if len(date_pieces) != 3: + break + real_details.append({ + 'id': line_pieces[0], + 'tag': line_pieces[1], + 'vm_size': line_pieces[2], + 'date': line_pieces[3], + 'vm_clock': line_pieces[4] + " " + line_pieces[5], + }) + del lines_after[0] + return real_details + + def _parse(self, cmd_output): + # Analysis done of qemu-img.c to figure out what is going on here + # Find all points start with some chars and then a ':' then a newline + # and then handle the results of those 'top level' items in a separate + # function. + # + # TODO(harlowja): newer versions might have a json output format + # we should switch to that whenever possible. + # see: http://bit.ly/XLJXDX + contents = {} + lines = [x for x in cmd_output.splitlines() if x.strip()] + while lines: + line = lines.pop(0) + top_level = self.TOP_LEVEL_RE.match(line) + if top_level: + root = self._canonicalize(top_level.group(1)) + if not root: + continue + root_details = top_level.group(2).strip() + details = self._extract_details(root, root_details, lines) + contents[root] = details + return contents diff --git a/rack/openstack/common/importutils.py b/rack/openstack/common/importutils.py new file mode 100644 index 0000000..4fd9ae2 --- /dev/null +++ b/rack/openstack/common/importutils.py @@ -0,0 +1,66 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class.""" + mod_str, _sep, class_str = import_str.rpartition('.') + try: + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + except (ValueError, AttributeError): + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/rack/openstack/common/jsonutils.py b/rack/openstack/common/jsonutils.py new file mode 100644 index 0000000..6fdd5ac --- /dev/null +++ b/rack/openstack/common/jsonutils.py @@ -0,0 +1,178 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import datetime +import functools +import inspect +import itertools +import json +try: + import xmlrpclib +except ImportError: + # NOTE(jd): xmlrpclib is not shipped with Python 3 + xmlrpclib = None + +import six + +from rack.openstack.common import gettextutils +from rack.openstack.common import importutils +from rack.openstack.common import timeutils + +netaddr = importutils.try_import("netaddr") + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + # handle obvious types first - order of basic types determined by running + # full tests on rack project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return six.text_type(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in six.iteritems(value)) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if xmlrpclib and isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) + else: + if any(test(value) for test in _nasty_type_tests): + return six.text_type(value) + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return six.text_type(value) + + +def dumps(value, default=to_primitive, **kwargs): + return json.dumps(value, default=default, **kwargs) + + +def loads(s): + return json.loads(s) + + +def load(s): + return json.load(s) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/rack/openstack/common/local.py b/rack/openstack/common/local.py new file mode 100644 index 0000000..0819d5b --- /dev/null +++ b/rack/openstack/common/local.py @@ -0,0 +1,45 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Local storage of variables using weak references""" + +import threading +import weakref + + +class WeakLocal(threading.local): + def __getattribute__(self, attr): + rval = super(WeakLocal, self).__getattribute__(attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return super(WeakLocal, self).__setattr__(attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = threading.local() diff --git a/rack/openstack/common/lockutils.py b/rack/openstack/common/lockutils.py new file mode 100644 index 0000000..163bec0 --- /dev/null +++ b/rack/openstack/common/lockutils.py @@ -0,0 +1,303 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import contextlib +import errno +import functools +import os +import shutil +import subprocess +import sys +import tempfile +import threading +import time +import weakref + +from oslo.config import cfg + +from rack.openstack.common import fileutils +from rack.openstack.common.gettextutils import _ # noqa +from rack.openstack.common import local +from rack.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +util_opts = [ + cfg.BoolOpt('disable_process_locking', default=False, + help='Whether to disable inter-process locks'), + cfg.StrOpt('lock_path', + default=os.environ.get("NOVA_LOCK_PATH"), + help=('Directory to use for lock files.')) +] + + +CONF = cfg.CONF +CONF.register_opts(util_opts) + + +def set_defaults(lock_path): + cfg.set_defaults(util_opts, lock_path=lock_path) + + +class _InterProcessLock(object): + """Lock implementation which allows multiple locks, working around + issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does + not require any cleanup. Since the lock is always held on a file + descriptor rather than outside of the process, the lock gets dropped + automatically if the process crashes, even if __exit__ is not executed. + + There are no guarantees regarding usage by multiple green threads in a + single process here. This lock works only between processes. Exclusive + access between local threads should be achieved using the semaphores + in the @synchronized decorator. + + Note these locks are released when the descriptor is closed, so it's not + safe to close the file descriptor while another green thread holds the + lock. Just opening and closing the lock file can break synchronisation, + so lock files must be accessed only using this abstraction. + """ + + def __init__(self, name): + self.lockfile = None + self.fname = name + + def __enter__(self): + self.lockfile = open(self.fname, 'w') + + while True: + try: + # Using non-blocking locks since green threads are not + # patched to deal with blocking locking calls. + # Also upon reading the MSDN docs for locking(), it seems + # to have a laughable 10 attempts "blocking" mechanism. + self.trylock() + return self + except IOError as e: + if e.errno in (errno.EACCES, errno.EAGAIN): + # external locks synchronise things like iptables + # updates - give it some time to prevent busy spinning + time.sleep(0.01) + else: + raise + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + self.unlock() + self.lockfile.close() + except IOError: + LOG.exception(_("Could not release the acquired lock `%s`"), + self.fname) + + def trylock(self): + raise NotImplementedError() + + def unlock(self): + raise NotImplementedError() + + +class _WindowsLock(_InterProcessLock): + def trylock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) + + def unlock(self): + msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) + + +class _PosixLock(_InterProcessLock): + def trylock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) + + def unlock(self): + fcntl.lockf(self.lockfile, fcntl.LOCK_UN) + + +if os.name == 'nt': + import msvcrt + InterProcessLock = _WindowsLock +else: + import fcntl + InterProcessLock = _PosixLock + +_semaphores = weakref.WeakValueDictionary() +_semaphores_lock = threading.Lock() + + +@contextlib.contextmanager +def lock(name, lock_file_prefix=None, external=False, lock_path=None): + """Context based lock + + This function yields a `threading.Semaphore` instance (if we don't use + eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is + True, in which case, it'll yield an InterProcessLock instance. + + :param lock_file_prefix: The lock_file_prefix argument is used to provide + lock files on disk with a meaningful prefix. + + :param external: The external keyword argument denotes whether this lock + should work across multiple processes. This means that if two different + workers both run a a method decorated with @synchronized('mylock', + external=True), only one of them will execute at a time. + + :param lock_path: The lock_path keyword argument is used to specify a + special location for external lock files to live. If nothing is set, then + CONF.lock_path is used as a default. + """ + with _semaphores_lock: + try: + sem = _semaphores[name] + except KeyError: + sem = threading.Semaphore() + _semaphores[name] = sem + + with sem: + LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name}) + + # NOTE(mikal): I know this looks odd + if not hasattr(local.strong_store, 'locks_held'): + local.strong_store.locks_held = [] + local.strong_store.locks_held.append(name) + + try: + if external and not CONF.disable_process_locking: + LOG.debug(_('Attempting to grab file lock "%(lock)s"'), + {'lock': name}) + + # We need a copy of lock_path because it is non-local + local_lock_path = lock_path or CONF.lock_path + if not local_lock_path: + raise cfg.RequiredOptError('lock_path') + + if not os.path.exists(local_lock_path): + fileutils.ensure_tree(local_lock_path) + LOG.info(_('Created lock path: %s'), local_lock_path) + + def add_prefix(name, prefix): + if not prefix: + return name + sep = '' if prefix.endswith('-') else '-' + return '%s%s%s' % (prefix, sep, name) + + # NOTE(mikal): the lock name cannot contain directory + # separators + lock_file_name = add_prefix(name.replace(os.sep, '_'), + lock_file_prefix) + + lock_file_path = os.path.join(local_lock_path, lock_file_name) + + try: + lock = InterProcessLock(lock_file_path) + with lock as lock: + LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), + {'lock': name, 'path': lock_file_path}) + yield lock + finally: + LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), + {'lock': name, 'path': lock_file_path}) + else: + yield sem + + finally: + local.strong_store.locks_held.remove(name) + + +def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): + """Synchronization decorator. + + Decorating a method like so:: + + @synchronized('mylock') + def foo(self, *args): + ... + + ensures that only one thread will execute the foo method at a time. + + Different methods can share the same lock:: + + @synchronized('mylock') + def foo(self, *args): + ... + + @synchronized('mylock') + def bar(self, *args): + ... + + This way only one of either foo or bar can be executing at a time. + """ + + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + try: + with lock(name, lock_file_prefix, external, lock_path): + LOG.debug(_('Got semaphore / lock "%(function)s"'), + {'function': f.__name__}) + return f(*args, **kwargs) + finally: + LOG.debug(_('Semaphore / lock released "%(function)s"'), + {'function': f.__name__}) + return inner + return wrap + + +def synchronized_with_prefix(lock_file_prefix): + """Partial object generator for the synchronization decorator. + + Redefine @synchronized in each project like so:: + + (in rack/utils.py) + from rack.openstack.common import lockutils + + synchronized = lockutils.synchronized_with_prefix('rack-') + + + (in rack/foo.py) + from rack import utils + + @utils.synchronized('mylock') + def bar(self, *args): + ... + + The lock_file_prefix argument is used to provide lock files on disk with a + meaningful prefix. + """ + + return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) + + +def main(argv): + """Create a dir for locks and pass it to command from arguments + + If you run this: + python -m openstack.common.lockutils python setup.py testr + + a temporary directory will be created for all your locks and passed to all + your tests in an environment variable. The temporary dir will be deleted + afterwards and the return value will be preserved. + """ + + lock_dir = tempfile.mkdtemp() + os.environ["NOVA_LOCK_PATH"] = lock_dir + try: + ret_val = subprocess.call(argv[1:]) + finally: + shutil.rmtree(lock_dir, ignore_errors=True) + return ret_val + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/rack/openstack/common/log.py b/rack/openstack/common/log.py new file mode 100644 index 0000000..7b72ba7 --- /dev/null +++ b/rack/openstack/common/log.py @@ -0,0 +1,655 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Openstack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import re +import sys +import traceback + +from oslo.config import cfg +import six +from six import moves + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import importutils +from rack.openstack.common import jsonutils +from rack.openstack.common import local + + +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config-append', + metavar='PATH', + deprecated_name='log-config', + help='The name of logging configuration file. It does not ' + 'disable existing loggers, but just appends specified ' + 'logging configuration to any other existing logging ' + 'options. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + cfg.StrOpt('log-format', + default=None, + metavar='FORMAT', + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and then will be changed in J to honor RFC5424'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Use syslog rfc5424 format for logging. ' + 'If enabled, will add APP-NAME (RFC5424) before the ' + 'MSG part of the syslog message. The old format ' + 'without APP-NAME is deprecated in I, ' + 'and will be removed in J.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error') +] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user)s %(tenant)s] ' + '%(instance)s%(message)s', + help='format string to use for log messages with context'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='format string to use for log messages without context'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='data to append to log format when level is DEBUG'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='prefix each line of exception output with this format'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqp=WARN', + 'amqplib=WARN', + 'boto=WARN', + 'qpid=WARN', + 'sqlalchemy=WARN', + 'suds=INFO', + 'oslo.messaging=INFO', + 'iso8601=WARN', + ], + help='list of logger=LEVEL pairs'), + cfg.BoolOpt('publish_errors', + default=False, + help='publish error events'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='make deprecations fatal'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='If an instance is passed with the log message, format ' + 'it like this'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='If an instance UUID is passed with the log message, ' + 'format it like this'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + return None + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + + @property + def handlers(self): + return self.logger.handlers + + def deprecated(self, msg, *args, **kwargs): + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + else: + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + # NOTE(mrodden): catch any Message/other object and + # coerce to unicode before they can get + # to the python logging and possibly + # cause string encoding trouble + if not isinstance(msg, six.string_types): + msg = six.text_type(msg) + + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid', None) or + kwargs.pop('instance_uuid', None)) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra.update({'instance': instance_extra}) + + extra.update({"project": self.project}) + extra.update({"version": self.version}) + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [moves.filter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(exc_type, value, tb): + extra = {} + if CONF.verbose or CONF.debug: + extra['exc_info'] = (exc_type, value, tb) + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except moves.configparser.Error as exc: + raise LogConfigError(log_config_append, str(exc)) + + +def setup(product_name): + """Setup logging.""" + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) + else: + _setup_logging_from_conf() + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string): + cfg.set_defaults(log_opts, + logging_context_format_string= + logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + super(RFCSysLogHandler, self).__init__(*args, **kwargs) + + def format(self, record): + msg = super(RFCSysLogHandler, self).format(record) + msg = self.binary_name + ' ' + msg + return msg + + +def _setup_logging_from_conf(): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not logpath: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + handler = importutils.import_object( + "rack.openstack.common.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + + datefmt = CONF.log_date_format + for handler in log_root.handlers: + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') + else: + handler.setFormatter(ContextFormatter(datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + level = logging.getLevelName(level_name) + logger = logging.getLogger(mod) + logger.setLevel(level) + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg.rstrip()) + + +class ContextFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(sdague): default the fancier formatting params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id', None): + self._fmt = CONF.logging_context_format_string + else: + self._fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + self._fmt += " " + CONF.logging_debug_format_suffix + + # Cache this on the record, Logger will respect our formatted copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = moves.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/rack/openstack/common/loopingcall.py b/rack/openstack/common/loopingcall.py new file mode 100644 index 0000000..b80633a --- /dev/null +++ b/rack/openstack/common/loopingcall.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from eventlet import event +from eventlet import greenthread + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import timeutils + +LOG = logging.getLogger(__name__) + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = timeutils.utcnow() + self.f(*self.args, **self.kw) + end = timeutils.utcnow() + if not self._running: + break + delay = interval - timeutils.delta_seconds(start, end) + if delay <= 0: + LOG.warn(_('task run outlasted interval by %s sec') % + -delay) + greenthread.sleep(delay if delay > 0 else 0) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +# TODO(mikal): this class name is deprecated in Havana and should be removed +# in the I release +LoopingCall = FixedIntervalLoopingCall + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug(_('Dynamic looping call sleeping for %.02f ' + 'seconds'), idle) + greenthread.sleep(idle) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/rack/openstack/common/memorycache.py b/rack/openstack/common/memorycache.py new file mode 100644 index 0000000..843573b --- /dev/null +++ b/rack/openstack/common/memorycache.py @@ -0,0 +1,97 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Super simple fake memcache client.""" + +from oslo.config import cfg + +from rack.openstack.common import timeutils + +memcache_opts = [ + cfg.ListOpt('memcached_servers', + default=None, + help='Memcached servers or None for in process cache.'), +] + +CONF = cfg.CONF +CONF.register_opts(memcache_opts) + + +def get_client(memcached_servers=None): + client_cls = Client + + if not memcached_servers: + memcached_servers = CONF.memcached_servers + if memcached_servers: + try: + import memcache + client_cls = memcache.Client + except ImportError: + pass + + return client_cls(memcached_servers, debug=0) + + +class Client(object): + """Replicates a tiny subset of memcached client interface.""" + + def __init__(self, *args, **kwargs): + """Ignores the passed in args.""" + self.cache = {} + + def get(self, key): + """Retrieves the value for a key or None. + + This expunges expired keys during each get. + """ + + now = timeutils.utcnow_ts() + for k in self.cache.keys(): + (timeout, _value) = self.cache[k] + if timeout and now >= timeout: + del self.cache[k] + + return self.cache.get(key, (0, None))[1] + + def set(self, key, value, time=0, min_compress_len=0): + """Sets the value for a key.""" + timeout = 0 + if time != 0: + timeout = timeutils.utcnow_ts() + time + self.cache[key] = (timeout, value) + return True + + def add(self, key, value, time=0, min_compress_len=0): + """Sets the value for a key if it doesn't exist.""" + if self.get(key) is not None: + return False + return self.set(key, value, time, min_compress_len) + + def incr(self, key, delta=1): + """Increments the value for a key.""" + value = self.get(key) + if value is None: + return None + new_value = int(value) + delta + self.cache[key] = (self.cache[key][0], str(new_value)) + return new_value + + def delete(self, key, time=0): + """Deletes the value associated with a key.""" + if key in self.cache: + del self.cache[key] diff --git a/rack/openstack/common/middleware/__init__.py b/rack/openstack/common/middleware/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/openstack/common/middleware/base.py b/rack/openstack/common/middleware/base.py new file mode 100644 index 0000000..2099549 --- /dev/null +++ b/rack/openstack/common/middleware/base.py @@ -0,0 +1,55 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Base class(es) for WSGI Middleware.""" + +import webob.dec + + +class Middleware(object): + """Base WSGI middleware wrapper. + + These classes require an application to be initialized that will be called + next. By default the middleware will simply call its wrapped app, or you + can override __call__ to customize its behavior. + """ + + @classmethod + def factory(cls, global_conf, **local_conf): + """Factory method for paste.deploy.""" + return cls + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) diff --git a/rack/openstack/common/middleware/request_id.py b/rack/openstack/common/middleware/request_id.py new file mode 100644 index 0000000..b84324f --- /dev/null +++ b/rack/openstack/common/middleware/request_id.py @@ -0,0 +1,38 @@ +# Copyright (c) 2013 NEC Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Middleware that ensures request ID. + +It ensures to assign request ID for each API request and set it to +request environment. The request ID is also added to API response. +""" + +from rack.openstack.common import context +from rack.openstack.common.middleware import base + + +ENV_REQUEST_ID = 'openstack.request_id' +HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id' + + +class RequestIdMiddleware(base.Middleware): + + def process_request(self, req): + self.req_id = context.generate_request_id() + req.environ[ENV_REQUEST_ID] = self.req_id + + def process_response(self, response): + response.headers.add(HTTP_RESP_HEADER_REQUEST_ID, self.req_id) + return response diff --git a/rack/openstack/common/network_utils.py b/rack/openstack/common/network_utils.py new file mode 100644 index 0000000..dbed1ce --- /dev/null +++ b/rack/openstack/common/network_utils.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Network-related utilities and helper functions. +""" + +import urlparse + + +def parse_host_port(address, default_port=None): + """Interpret a string as a host:port pair. + + An IPv6 address MUST be escaped if accompanied by a port, + because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 + means both [2001:db8:85a3::8a2e:370:7334] and + [2001:db8:85a3::8a2e:370]:7334. + + >>> parse_host_port('server01:80') + ('server01', 80) + >>> parse_host_port('server01') + ('server01', None) + >>> parse_host_port('server01', default_port=1234) + ('server01', 1234) + >>> parse_host_port('[::1]:80') + ('::1', 80) + >>> parse_host_port('[::1]') + ('::1', None) + >>> parse_host_port('[::1]', default_port=1234) + ('::1', 1234) + >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) + ('2001:db8:85a3::8a2e:370:7334', 1234) + + """ + if address[0] == '[': + # Escaped ipv6 + _host, _port = address[1:].split(']') + host = _host + if ':' in _port: + port = _port.split(':')[1] + else: + port = default_port + else: + if address.count(':') == 1: + host, port = address.split(':') + else: + # 0 means ipv4, >1 means ipv6. + # We prohibit unescaped ipv6 addresses with port. + host = address + port = default_port + + return (host, None if port is None else int(port)) + + +def urlsplit(url, scheme='', allow_fragments=True): + """Parse a URL using urlparse.urlsplit(), splitting query and fragments. + This function papers over Python issue9374 when needed. + + The parameters are the same as urlparse.urlsplit. + """ + scheme, netloc, path, query, fragment = urlparse.urlsplit( + url, scheme, allow_fragments) + if allow_fragments and '#' in path: + path, fragment = path.split('#', 1) + if '?' in path: + path, query = path.split('?', 1) + return urlparse.SplitResult(scheme, netloc, path, query, fragment) diff --git a/rack/openstack/common/periodic_task.py b/rack/openstack/common/periodic_task.py new file mode 100644 index 0000000..37c8cd9 --- /dev/null +++ b/rack/openstack/common/periodic_task.py @@ -0,0 +1,190 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import time + +from oslo.config import cfg +import six + +from rack.openstack.common.gettextutils import _ # noqa +from rack.openstack.common import log as logging +from rack.openstack.common import timeutils + + +periodic_opts = [ + cfg.BoolOpt('run_external_periodic_tasks', + default=True, + help=('Some periodic tasks can be run in a separate process. ' + 'Should we run them here?')), +] + +CONF = cfg.CONF +CONF.register_opts(periodic_opts) + +LOG = logging.getLogger(__name__) + +DEFAULT_INTERVAL = 60.0 + + +class InvalidPeriodicTaskArg(Exception): + message = _("Unexpected argument for periodic task creation: %(arg)s.") + + +def periodic_task(*args, **kwargs): + """Decorator to indicate that a method is a periodic task. + + This decorator can be used in two ways: + + 1. Without arguments '@periodic_task', this will be run on every cycle + of the periodic scheduler. + + 2. With arguments: + @periodic_task(spacing=N [, run_immediately=[True|False]]) + this will be run on approximately every N seconds. If this number is + negative the periodic task will be disabled. If the run_immediately + argument is provided and has a value of 'True', the first run of the + task will be shortly after task scheduler starts. If + run_immediately is omitted or set to 'False', the first time the + task runs will be approximately N seconds after the task scheduler + starts. + """ + def decorator(f): + # Test for old style invocation + if 'ticks_between_runs' in kwargs: + raise InvalidPeriodicTaskArg(arg='ticks_between_runs') + + # Control if run at all + f._periodic_task = True + f._periodic_external_ok = kwargs.pop('external_process_ok', False) + if f._periodic_external_ok and not CONF.run_external_periodic_tasks: + f._periodic_enabled = False + else: + f._periodic_enabled = kwargs.pop('enabled', True) + + # Control frequency + f._periodic_spacing = kwargs.pop('spacing', 0) + f._periodic_immediate = kwargs.pop('run_immediately', False) + if f._periodic_immediate: + f._periodic_last_run = None + else: + f._periodic_last_run = timeutils.utcnow() + return f + + # NOTE(sirp): The `if` is necessary to allow the decorator to be used with + # and without parens. + # + # In the 'with-parens' case (with kwargs present), this function needs to + # return a decorator function since the interpreter will invoke it like: + # + # periodic_task(*args, **kwargs)(f) + # + # In the 'without-parens' case, the original function will be passed + # in as the first argument, like: + # + # periodic_task(f) + if kwargs: + return decorator + else: + return decorator(args[0]) + + +class _PeriodicTasksMeta(type): + def __init__(cls, names, bases, dict_): + """Metaclass that allows us to collect decorated periodic tasks.""" + super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) + + # NOTE(sirp): if the attribute is not present then we must be the base + # class, so, go ahead an initialize it. If the attribute is present, + # then we're a subclass so make a copy of it so we don't step on our + # parent's toes. + try: + cls._periodic_tasks = cls._periodic_tasks[:] + except AttributeError: + cls._periodic_tasks = [] + + try: + cls._periodic_last_run = cls._periodic_last_run.copy() + except AttributeError: + cls._periodic_last_run = {} + + try: + cls._periodic_spacing = cls._periodic_spacing.copy() + except AttributeError: + cls._periodic_spacing = {} + + for value in cls.__dict__.values(): + if getattr(value, '_periodic_task', False): + task = value + name = task.__name__ + + if task._periodic_spacing < 0: + LOG.info(_('Skipping periodic task %(task)s because ' + 'its interval is negative'), + {'task': name}) + continue + if not task._periodic_enabled: + LOG.info(_('Skipping periodic task %(task)s because ' + 'it is disabled'), + {'task': name}) + continue + + # A periodic spacing of zero indicates that this task should + # be run every pass + if task._periodic_spacing == 0: + task._periodic_spacing = None + + cls._periodic_tasks.append((name, task)) + cls._periodic_spacing[name] = task._periodic_spacing + cls._periodic_last_run[name] = task._periodic_last_run + + +@six.add_metaclass(_PeriodicTasksMeta) +class PeriodicTasks(object): + + def run_periodic_tasks(self, context, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + idle_for = DEFAULT_INTERVAL + for task_name, task in self._periodic_tasks: + full_task_name = '.'.join([self.__class__.__name__, task_name]) + + now = timeutils.utcnow() + spacing = self._periodic_spacing[task_name] + last_run = self._periodic_last_run[task_name] + + # If a periodic task is _nearly_ due, then we'll run it early + if spacing is not None and last_run is not None: + due = last_run + datetime.timedelta(seconds=spacing) + if not timeutils.is_soon(due, 0.2): + idle_for = min(idle_for, timeutils.delta_seconds(now, due)) + continue + + if spacing is not None: + idle_for = min(idle_for, spacing) + + LOG.debug(_("Running periodic task %(full_task_name)s"), + {"full_task_name": full_task_name}) + self._periodic_last_run[task_name] = timeutils.utcnow() + + try: + task(self, context) + except Exception as e: + if raise_on_error: + raise + LOG.exception(_("Error during %(full_task_name)s: %(e)s"), + {"full_task_name": full_task_name, "e": e}) + time.sleep(0) + + return idle_for diff --git a/rack/openstack/common/policy.py b/rack/openstack/common/policy.py new file mode 100644 index 0000000..aa016b5 --- /dev/null +++ b/rack/openstack/common/policy.py @@ -0,0 +1,779 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Common Policy Engine Implementation + +Policies can be expressed in one of two forms: A list of lists, or a +string written in the new policy language. + +In the list-of-lists representation, each check inside the innermost +list is combined as with an "and" conjunction--for that check to pass, +all the specified checks must pass. These innermost lists are then +combined as with an "or" conjunction. This is the original way of +expressing policies, but there now exists a new way: the policy +language. + +In the policy language, each check is specified the same way as in the +list-of-lists representation: a simple "a:b" pair that is matched to +the correct code to perform that check. However, conjunction +operators are available, allowing for more expressiveness in crafting +policies. + +As an example, take the following rule, expressed in the list-of-lists +representation:: + + [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]] + +In the policy language, this becomes:: + + role:admin or (project_id:%(project_id)s and role:projectadmin) + +The policy language also has the "not" operator, allowing a richer +policy rule:: + + project_id:%(project_id)s and not role:dunce + +Finally, two special policy checks should be mentioned; the policy +check "@" will always accept an access, and the policy check "!" will +always reject an access. (Note that if a rule is either the empty +list ("[]") or the empty string, this is equivalent to the "@" policy +check.) Of these, the "!" policy check is probably the most useful, +as it allows particular rules to be explicitly disabled. +""" + +import abc +import re +import urllib + +import urllib2 + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import jsonutils +from rack.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +_rules = None +_checks = {} + + +class Rules(dict): + """ + A store for rules. Handles the default_rule setting directly. + """ + + @classmethod + def load_json(cls, data, default_rule=None): + """ + Allow loading of JSON rule data. + """ + + # Suck in the JSON data and parse the rules + rules = dict((k, parse_rule(v)) for k, v in + jsonutils.loads(data).items()) + + return cls(rules, default_rule) + + def __init__(self, rules=None, default_rule=None): + """Initialize the Rules store.""" + + super(Rules, self).__init__(rules or {}) + self.default_rule = default_rule + + def __missing__(self, key): + """Implements the default rule handling.""" + + # If the default rule isn't actually defined, do something + # reasonably intelligent + if not self.default_rule or self.default_rule not in self: + raise KeyError(key) + + return self[self.default_rule] + + def __str__(self): + """Dumps a string representation of the rules.""" + + # Start by building the canonical strings for the rules + out_rules = {} + for key, value in self.items(): + # Use empty string for singleton TrueCheck instances + if isinstance(value, TrueCheck): + out_rules[key] = '' + else: + out_rules[key] = str(value) + + # Dump a pretty-printed JSON representation + return jsonutils.dumps(out_rules, indent=4) + + +# Really have to figure out a way to deprecate this +def set_rules(rules): + """Set the rules in use for policy checks.""" + + global _rules + + _rules = rules + + +# Ditto +def reset(): + """Clear the rules used for policy checks.""" + + global _rules + + _rules = None + + +def check(rule, target, creds, exc=None, *args, **kwargs): + """ + Checks authorization of a rule against the target and credentials. + + :param rule: The rule to evaluate. + :param target: As much information about the object being operated + on as possible, as a dictionary. + :param creds: As much information about the user performing the + action as possible, as a dictionary. + :param exc: Class of the exception to raise if the check fails. + Any remaining arguments passed to check() (both + positional and keyword arguments) will be passed to + the exception class. If exc is not provided, returns + False. + + :return: Returns False if the policy does not allow the action and + exc is not provided; otherwise, returns a value that + evaluates to True. Note: for rules using the "case" + expression, this True value will be the specified string + from the expression. + """ + + # Allow the rule to be a Check tree + if isinstance(rule, BaseCheck): + result = rule(target, creds) + elif not _rules: + # No rules to reference means we're going to fail closed + result = False + else: + try: + # Evaluate the rule + result = _rules[rule](target, creds) + except KeyError: + # If the rule doesn't exist, fail closed + result = False + + # If it is False, raise the exception if requested + if exc and result is False: + raise exc(*args, **kwargs) + + return result + + +class BaseCheck(object): + """ + Abstract base class for Check classes. + """ + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def __str__(self): + """ + Retrieve a string representation of the Check tree rooted at + this node. + """ + + pass + + @abc.abstractmethod + def __call__(self, target, cred): + """ + Perform the check. Returns False to reject the access or a + true value (not necessary True) to accept the access. + """ + + pass + + +class FalseCheck(BaseCheck): + """ + A policy check that always returns False (disallow). + """ + + def __str__(self): + """Return a string representation of this check.""" + + return "!" + + def __call__(self, target, cred): + """Check the policy.""" + + return False + + +class TrueCheck(BaseCheck): + """ + A policy check that always returns True (allow). + """ + + def __str__(self): + """Return a string representation of this check.""" + + return "@" + + def __call__(self, target, cred): + """Check the policy.""" + + return True + + +class Check(BaseCheck): + """ + A base class to allow for user-defined policy checks. + """ + + def __init__(self, kind, match): + """ + :param kind: The kind of the check, i.e., the field before the + ':'. + :param match: The match of the check, i.e., the field after + the ':'. + """ + + self.kind = kind + self.match = match + + def __str__(self): + """Return a string representation of this check.""" + + return "%s:%s" % (self.kind, self.match) + + +class NotCheck(BaseCheck): + """ + A policy check that inverts the result of another policy check. + Implements the "not" operator. + """ + + def __init__(self, rule): + """ + Initialize the 'not' check. + + :param rule: The rule to negate. Must be a Check. + """ + + self.rule = rule + + def __str__(self): + """Return a string representation of this check.""" + + return "not %s" % self.rule + + def __call__(self, target, cred): + """ + Check the policy. Returns the logical inverse of the wrapped + check. + """ + + return not self.rule(target, cred) + + +class AndCheck(BaseCheck): + """ + A policy check that requires that a list of other checks all + return True. Implements the "and" operator. + """ + + def __init__(self, rules): + """ + Initialize the 'and' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' and '.join(str(r) for r in self.rules) + + def __call__(self, target, cred): + """ + Check the policy. Requires that all rules accept in order to + return True. + """ + + for rule in self.rules: + if not rule(target, cred): + return False + + return True + + def add_check(self, rule): + """ + Allows addition of another rule to the list of rules that will + be tested. Returns the AndCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +class OrCheck(BaseCheck): + """ + A policy check that requires that at least one of a list of other + checks returns True. Implements the "or" operator. + """ + + def __init__(self, rules): + """ + Initialize the 'or' check. + + :param rules: A list of rules that will be tested. + """ + + self.rules = rules + + def __str__(self): + """Return a string representation of this check.""" + + return "(%s)" % ' or '.join(str(r) for r in self.rules) + + def __call__(self, target, cred): + """ + Check the policy. Requires that at least one rule accept in + order to return True. + """ + + for rule in self.rules: + if rule(target, cred): + return True + + return False + + def add_check(self, rule): + """ + Allows addition of another rule to the list of rules that will + be tested. Returns the OrCheck object for convenience. + """ + + self.rules.append(rule) + return self + + +def _parse_check(rule): + """ + Parse a single base check rule into an appropriate Check object. + """ + + # Handle the special checks + if rule == '!': + return FalseCheck() + elif rule == '@': + return TrueCheck() + + try: + kind, match = rule.split(':', 1) + except Exception: + LOG.exception(_("Failed to understand rule %(rule)s") % locals()) + # If the rule is invalid, we'll fail closed + return FalseCheck() + + # Find what implements the check + if kind in _checks: + return _checks[kind](kind, match) + elif None in _checks: + return _checks[None](kind, match) + else: + LOG.error(_("No handler for matches of kind %s") % kind) + return FalseCheck() + + +def _parse_list_rule(rule): + """ + Provided for backwards compatibility. Translates the old + list-of-lists syntax into a tree of Check objects. + """ + + # Empty rule defaults to True + if not rule: + return TrueCheck() + + # Outer list is joined by "or"; inner list by "and" + or_list = [] + for inner_rule in rule: + # Elide empty inner lists + if not inner_rule: + continue + + # Handle bare strings + if isinstance(inner_rule, basestring): + inner_rule = [inner_rule] + + # Parse the inner rules into Check objects + and_list = [_parse_check(r) for r in inner_rule] + + # Append the appropriate check to the or_list + if len(and_list) == 1: + or_list.append(and_list[0]) + else: + or_list.append(AndCheck(and_list)) + + # If we have only one check, omit the "or" + if len(or_list) == 0: + return FalseCheck() + elif len(or_list) == 1: + return or_list[0] + + return OrCheck(or_list) + + +# Used for tokenizing the policy language +_tokenize_re = re.compile(r'\s+') + + +def _parse_tokenize(rule): + """ + Tokenizer for the policy language. + + Most of the single-character tokens are specified in the + _tokenize_re; however, parentheses need to be handled specially, + because they can appear inside a check string. Thankfully, those + parentheses that appear inside a check string can never occur at + the very beginning or end ("%(variable)s" is the correct syntax). + """ + + for tok in _tokenize_re.split(rule): + # Skip empty tokens + if not tok or tok.isspace(): + continue + + # Handle leading parens on the token + clean = tok.lstrip('(') + for i in range(len(tok) - len(clean)): + yield '(', '(' + + # If it was only parentheses, continue + if not clean: + continue + else: + tok = clean + + # Handle trailing parens on the token + clean = tok.rstrip(')') + trail = len(tok) - len(clean) + + # Yield the cleaned token + lowered = clean.lower() + if lowered in ('and', 'or', 'not'): + # Special tokens + yield lowered, clean + elif clean: + # Not a special token, but not composed solely of ')' + if len(tok) >= 2 and ((tok[0], tok[-1]) in + [('"', '"'), ("'", "'")]): + # It's a quoted string + yield 'string', tok[1:-1] + else: + yield 'check', _parse_check(clean) + + # Yield the trailing parens + for i in range(trail): + yield ')', ')' + + +class ParseStateMeta(type): + """ + Metaclass for the ParseState class. Facilitates identifying + reduction methods. + """ + + def __new__(mcs, name, bases, cls_dict): + """ + Create the class. Injects the 'reducers' list, a list of + tuples matching token sequences to the names of the + corresponding reduction methods. + """ + + reducers = [] + + for key, value in cls_dict.items(): + if not hasattr(value, 'reducers'): + continue + for reduction in value.reducers: + reducers.append((reduction, key)) + + cls_dict['reducers'] = reducers + + return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict) + + +def reducer(*tokens): + """ + Decorator for reduction methods. Arguments are a sequence of + tokens, in order, which should trigger running this reduction + method. + """ + + def decorator(func): + # Make sure we have a list of reducer sequences + if not hasattr(func, 'reducers'): + func.reducers = [] + + # Add the tokens to the list of reducer sequences + func.reducers.append(list(tokens)) + + return func + + return decorator + + +class ParseState(object): + """ + Implement the core of parsing the policy language. Uses a greedy + reduction algorithm to reduce a sequence of tokens into a single + terminal, the value of which will be the root of the Check tree. + + Note: error reporting is rather lacking. The best we can get with + this parser formulation is an overall "parse failed" error. + Fortunately, the policy language is simple enough that this + shouldn't be that big a problem. + """ + + __metaclass__ = ParseStateMeta + + def __init__(self): + """Initialize the ParseState.""" + + self.tokens = [] + self.values = [] + + def reduce(self): + """ + Perform a greedy reduction of the token stream. If a reducer + method matches, it will be executed, then the reduce() method + will be called recursively to search for any more possible + reductions. + """ + + for reduction, methname in self.reducers: + if (len(self.tokens) >= len(reduction) and + self.tokens[-len(reduction):] == reduction): + # Get the reduction method + meth = getattr(self, methname) + + # Reduce the token stream + results = meth(*self.values[-len(reduction):]) + + # Update the tokens and values + self.tokens[-len(reduction):] = [r[0] for r in results] + self.values[-len(reduction):] = [r[1] for r in results] + + # Check for any more reductions + return self.reduce() + + def shift(self, tok, value): + """Adds one more token to the state. Calls reduce().""" + + self.tokens.append(tok) + self.values.append(value) + + # Do a greedy reduce... + self.reduce() + + @property + def result(self): + """ + Obtain the final result of the parse. Raises ValueError if + the parse failed to reduce to a single result. + """ + + if len(self.values) != 1: + raise ValueError("Could not parse rule") + return self.values[0] + + @reducer('(', 'check', ')') + @reducer('(', 'and_expr', ')') + @reducer('(', 'or_expr', ')') + def _wrap_check(self, _p1, check, _p2): + """Turn parenthesized expressions into a 'check' token.""" + + return [('check', check)] + + @reducer('check', 'and', 'check') + def _make_and_expr(self, check1, _and, check2): + """ + Create an 'and_expr' from two checks joined by the 'and' + operator. + """ + + return [('and_expr', AndCheck([check1, check2]))] + + @reducer('and_expr', 'and', 'check') + def _extend_and_expr(self, and_expr, _and, check): + """ + Extend an 'and_expr' by adding one more check. + """ + + return [('and_expr', and_expr.add_check(check))] + + @reducer('check', 'or', 'check') + def _make_or_expr(self, check1, _or, check2): + """ + Create an 'or_expr' from two checks joined by the 'or' + operator. + """ + + return [('or_expr', OrCheck([check1, check2]))] + + @reducer('or_expr', 'or', 'check') + def _extend_or_expr(self, or_expr, _or, check): + """ + Extend an 'or_expr' by adding one more check. + """ + + return [('or_expr', or_expr.add_check(check))] + + @reducer('not', 'check') + def _make_not_expr(self, _not, check): + """Invert the result of another check.""" + + return [('check', NotCheck(check))] + + +def _parse_text_rule(rule): + """ + Translates a policy written in the policy language into a tree of + Check objects. + """ + + # Empty rule means always accept + if not rule: + return TrueCheck() + + # Parse the token stream + state = ParseState() + for tok, value in _parse_tokenize(rule): + state.shift(tok, value) + + try: + return state.result + except ValueError: + # Couldn't parse the rule + LOG.exception(_("Failed to understand rule %(rule)r") % locals()) + + # Fail closed + return FalseCheck() + + +def parse_rule(rule): + """ + Parses a policy rule into a tree of Check objects. + """ + + # If the rule is a string, it's in the policy language + if isinstance(rule, basestring): + return _parse_text_rule(rule) + return _parse_list_rule(rule) + + +def register(name, func=None): + """ + Register a function or Check class as a policy check. + + :param name: Gives the name of the check type, e.g., 'rule', + 'role', etc. If name is None, a default check type + will be registered. + :param func: If given, provides the function or class to register. + If not given, returns a function taking one argument + to specify the function or class to register, + allowing use as a decorator. + """ + + # Perform the actual decoration by registering the function or + # class. Returns the function or class for compliance with the + # decorator interface. + def decorator(func): + _checks[name] = func + return func + + # If the function or class is given, do the registration + if func: + return decorator(func) + + return decorator + + +@register("rule") +class RuleCheck(Check): + def __call__(self, target, creds): + """ + Recursively checks credentials based on the defined rules. + """ + + try: + return _rules[self.match](target, creds) + except KeyError: + # We don't have any matching rule; fail closed + return False + + +@register("role") +class RoleCheck(Check): + def __call__(self, target, creds): + """Check that there is a matching role in the cred dict.""" + + return self.match.lower() in [x.lower() for x in creds['roles']] + + +@register('http') +class HttpCheck(Check): + def __call__(self, target, creds): + """ + Check http: rules by calling to a remote server. + + This example implementation simply verifies that the response + is exactly 'True'. + """ + + url = ('http:' + self.match) % target + data = {'target': jsonutils.dumps(target), + 'credentials': jsonutils.dumps(creds)} + post_data = urllib.urlencode(data) + f = urllib2.urlopen(url, post_data) + return f.read() == "True" + + +@register(None) +class GenericCheck(Check): + def __call__(self, target, creds): + """ + Check an individual match. + + Matches look like: + + tenant:%(tenant_id)s + role:compute:admin + """ + + # TODO(termie): do dict inspection via dot syntax + match = self.match % target + if self.kind in creds: + return match == unicode(creds[self.kind]) + return False diff --git a/rack/openstack/common/processutils.py b/rack/openstack/common/processutils.py new file mode 100644 index 0000000..17475b2 --- /dev/null +++ b/rack/openstack/common/processutils.py @@ -0,0 +1,266 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import errno +import logging as stdlib_logging +import os +import random +import shlex +import signal + +from eventlet.green import subprocess +from eventlet import greenthread +import six + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + + +class InvalidArgumentError(Exception): + def __init__(self, message=None): + super(InvalidArgumentError, self).__init__(message) + + +class UnknownArgumentError(Exception): + def __init__(self, message=None): + super(UnknownArgumentError, self).__init__(message) + + +class ProcessExecutionError(Exception): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = _("Unexpected error while running command.") + if exit_code is None: + exit_code = '-' + message = _('%(description)s\n' + 'Command: %(cmd)s\n' + 'Exit code: %(exit_code)s\n' + 'Stdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % {'description': description, + 'cmd': cmd, + 'exit_code': exit_code, + 'stdout': stdout, + 'stderr': stderr} + super(ProcessExecutionError, self).__init__(message) + + +class NoRootWrapSpecified(Exception): + def __init__(self, message=None): + super(NoRootWrapSpecified, self).__init__(message) + + +def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + +def execute(*cmd, **kwargs): + """Helper method to shell out and execute a command through subprocess. + + Allows optional retry. + + :param cmd: Passed to subprocess.Popen. + :type cmd: string + :param process_input: Send to opened process. + :type process_input: string + :param check_exit_code: Single bool, int, or list of allowed exit + codes. Defaults to [0]. Raise + :class:`ProcessExecutionError` unless + program exits with one of these code. + :type check_exit_code: boolean, int, or [int] + :param delay_on_retry: True | False. Defaults to True. If set to True, + wait a short amount of time before retrying. + :type delay_on_retry: boolean + :param attempts: How many times to retry cmd. + :type attempts: int + :param run_as_root: True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the root_helper kwarg. + :type run_as_root: boolean + :param root_helper: command to prefix to commands called with + run_as_root=True + :type root_helper: string + :param shell: whether or not there should be a shell used to + execute this command. Defaults to false. + :type shell: boolean + :param loglevel: log level for execute commands. + :type loglevel: int. (Should be stdlib_logging.DEBUG or + stdlib_logging.INFO) + :returns: (stdout, stderr) from process execution + :raises: :class:`UnknownArgumentError` on + receiving unknown arguments + :raises: :class:`ProcessExecutionError` + """ + + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', [0]) + ignore_exit_code = False + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) + root_helper = kwargs.pop('root_helper', '') + shell = kwargs.pop('shell', False) + loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG) + + if isinstance(check_exit_code, bool): + ignore_exit_code = not check_exit_code + check_exit_code = [0] + elif isinstance(check_exit_code, int): + check_exit_code = [check_exit_code] + + if kwargs: + raise UnknownArgumentError(_('Got unknown keyword args ' + 'to utils.execute: %r') % kwargs) + + if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: + if not root_helper: + raise NoRootWrapSpecified( + message=_('Command requested root, but did not ' + 'specify a root helper.')) + cmd = shlex.split(root_helper) + list(cmd) + + cmd = map(str, cmd) + + while attempts > 0: + attempts -= 1 + try: + LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd)) + _PIPE = subprocess.PIPE # pylint: disable=E1101 + + if os.name == 'nt': + preexec_fn = None + close_fds = False + else: + preexec_fn = _subprocess_setup + close_fds = True + + obj = subprocess.Popen(cmd, + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, + close_fds=close_fds, + preexec_fn=preexec_fn, + shell=shell) + result = None + for _i in six.moves.range(20): + # NOTE(russellb) 20 is an arbitrary number of retries to + # prevent any chance of looping forever here. + try: + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + except OSError as e: + if e.errno in (errno.EAGAIN, errno.EINTR): + continue + raise + break + obj.stdin.close() # pylint: disable=E1101 + _returncode = obj.returncode # pylint: disable=E1101 + LOG.log(loglevel, _('Result was %s') % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + return result + except ProcessExecutionError: + if not attempts: + raise + else: + LOG.log(loglevel, _('%r failed. Retrying.'), cmd) + if delay_on_retry: + greenthread.sleep(random.randint(20, 200) / 100.0) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) + + +def trycmd(*args, **kwargs): + """A wrapper around execute() to more easily handle warnings and errors. + + Returns an (out, err) tuple of strings containing the output of + the command's stdout and stderr. If 'err' is not empty then the + command can be considered to have failed. + + :discard_warnings True | False. Defaults to False. If set to True, + then for succeeding commands, stderr is cleared + + """ + discard_warnings = kwargs.pop('discard_warnings', False) + + try: + out, err = execute(*args, **kwargs) + failed = False + except ProcessExecutionError as exn: + out, err = '', str(exn) + failed = True + + if not failed and discard_warnings and err: + # Handle commands that output to stderr but otherwise succeed + err = '' + + return out, err + + +def ssh_execute(ssh, cmd, process_input=None, + addl_env=None, check_exit_code=True): + LOG.debug(_('Running cmd (SSH): %s'), cmd) + if addl_env: + raise InvalidArgumentError(_('Environment not supported over SSH')) + + if process_input: + # This is (probably) fixable if we need it... + raise InvalidArgumentError(_('process_input not supported over SSH')) + + stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) + channel = stdout_stream.channel + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_('Result was %s') % exit_status) + if check_exit_code and exit_status != 0: + raise ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=cmd) + + return (stdout, stderr) diff --git a/rack/openstack/common/report/__init__.py b/rack/openstack/common/report/__init__.py new file mode 100644 index 0000000..35390ec --- /dev/null +++ b/rack/openstack/common/report/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides a way to generate serializable reports + +This package/module provides mechanisms for defining reports +which may then be serialized into various data types. Each +report ( :class:`openstack.common.report.report.BasicReport` ) +is composed of one or more report sections +( :class:`openstack.common.report.report.BasicSection` ), +which contain generators which generate data models +( :class:`openstack.common.report.models.base.ReportModels` ), +which are then serialized by views. +""" diff --git a/rack/openstack/common/report/generators/__init__.py b/rack/openstack/common/report/generators/__init__.py new file mode 100644 index 0000000..68473f2 --- /dev/null +++ b/rack/openstack/common/report/generators/__init__.py @@ -0,0 +1,21 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides Data Model Generators + +This module defines classes for generating data models +( :class:`openstack.common.report.models.base.ReportModel` ). +A generator is any object which is callable with no parameters +and returns a data model. +""" diff --git a/rack/openstack/common/report/generators/conf.py b/rack/openstack/common/report/generators/conf.py new file mode 100644 index 0000000..11b1f0d --- /dev/null +++ b/rack/openstack/common/report/generators/conf.py @@ -0,0 +1,44 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides Openstack config generators + +This module defines a class for configuration +generators for generating the model in +:mod:`openstack.common.report.models.conf`. +""" + +from oslo.config import cfg + +import rack.openstack.common.report.models.conf as cm + + +class ConfigReportGenerator(object): + """A Configuration Data Generator + + This generator returns + :class:`openstack.common.report.models.conf.ConfigModel` , + by default using the configuration options stored + in :attr:`oslo.config.cfg.CONF`, which is where + Openstack stores everything. + + :param cnf: the configuration option object + :type cnf: :class:`oslo.config.cfg.ConfigOpts` + """ + + def __init__(self, cnf=cfg.CONF): + self.conf_obj = cnf + + def __call__(self): + return cm.ConfigModel(self.conf_obj) diff --git a/rack/openstack/common/report/generators/threading.py b/rack/openstack/common/report/generators/threading.py new file mode 100644 index 0000000..e0eecf7 --- /dev/null +++ b/rack/openstack/common/report/generators/threading.py @@ -0,0 +1,73 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides thread-related generators + +This module defines classes for threading-related +generators for generating the models in +:mod:`openstack.common.report.models.threading`. +""" + +import sys + +import greenlet + +import rack.openstack.common.report.models.threading as tm +from rack.openstack.common.report.models import with_default_views as mwdv +import rack.openstack.common.report.utils as rutils +import rack.openstack.common.report.views.text.generic as text_views + + +class ThreadReportGenerator(object): + """A Thread Data Generator + + This generator returns a collection of + :class:`openstack.common.report.models.threading.ThreadModel` + objects by introspecting the current python state using + :func:`sys._current_frames()` . + """ + + def __call__(self): + threadModels = [ + tm.ThreadModel(thread_id, stack) + for thread_id, stack in sys._current_frames().items() + ] + + thread_pairs = dict(zip(range(len(threadModels)), threadModels)) + return mwdv.ModelWithDefaultViews(thread_pairs, + text_view=text_views.MultiView()) + + +class GreenThreadReportGenerator(object): + """A Green Thread Data Generator + + This generator returns a collection of + :class:`openstack.common.report.models.threading.GreenThreadModel` + objects by introspecting the current python garbage collection + state, and sifting through for :class:`greenlet.greenlet` objects. + + .. seealso:: + + Function :func:`openstack.common.report.utils._find_objects` + """ + + def __call__(self): + threadModels = [ + tm.GreenThreadModel(gr.gr_frame) + for gr in rutils._find_objects(greenlet.greenlet) + ] + + thread_pairs = dict(zip(range(len(threadModels)), threadModels)) + return mwdv.ModelWithDefaultViews(thread_pairs, + text_view=text_views.MultiView()) diff --git a/rack/openstack/common/report/generators/version.py b/rack/openstack/common/report/generators/version.py new file mode 100644 index 0000000..5aeab34 --- /dev/null +++ b/rack/openstack/common/report/generators/version.py @@ -0,0 +1,46 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides Openstack version generators + +This module defines a class for Openstack +version and package information +generators for generating the model in +:mod:`openstack.common.report.models.version`. +""" + +import rack.openstack.common.report.models.version as vm + + +class PackageReportGenerator(object): + """A Package Information Data Generator + + This generator returns + :class:`openstack.common.report.models.version.PackageModel`, + extracting data from the given version object, which should follow + the general format defined in Nova's version information (i.e. it + should contain the methods vendor_string, product_string, and + version_string_with_package). + + :param version_object: the version information object + """ + + def __init__(self, version_obj): + self.version_obj = version_obj + + def __call__(self): + return vm.PackageModel( + self.version_obj.vendor_string(), + self.version_obj.product_string(), + self.version_obj.version_string_with_package()) diff --git a/rack/openstack/common/report/guru_meditation_report.py b/rack/openstack/common/report/guru_meditation_report.py new file mode 100644 index 0000000..a01c520 --- /dev/null +++ b/rack/openstack/common/report/guru_meditation_report.py @@ -0,0 +1,186 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides Guru Meditation Report + +This module defines the actual OpenStack Guru Meditation +Report class. + +This can be used in the OpenStack command definition files. +For example, in a rack command module (under rack/cmd): + +.. code-block:: python + :emphasize-lines: 8,9,10 + + CONF = cfg.CONF + # maybe import some options here... + + def main(): + config.parse_args(sys.argv) + logging.setup('blah') + + TextGuruMeditation.register_section('Some Special Section', + special_section_generator) + TextGuruMeditation.setup_autorun(version_object) + + server = service.Service.create(binary='some-service', + topic=CONF.some_service_topic) + service.serve(server) + service.wait() + +Then, you can do + +.. code-block:: bash + + $ kill -USR1 $SERVICE_PID + +and get a Guru Meditation Report in the file or terminal +where stderr is logged for that given service. +""" + +from __future__ import print_function + +import signal +import sys + +from rack.openstack.common.report.generators import conf as cgen +from rack.openstack.common.report.generators import threading as tgen +from rack.openstack.common.report.generators import version as pgen +from rack.openstack.common.report import report + + +class GuruMeditation(object): + """A Guru Meditation Report Mixin/Base Class + + This class is a base class for Guru Meditation Reports. + It provides facilities for registering sections and + setting up functionality to auto-run the report on + a certain signal. + + This class should always be used in conjunction with + a Report class via multiple inheritance. It should + always come first in the class list to ensure the + MRO is correct. + """ + + def __init__(self, version_obj, *args, **kwargs): + self.version_obj = version_obj + + super(GuruMeditation, self).__init__(*args, **kwargs) + self.start_section_index = len(self.sections) + + @classmethod + def register_section(cls, section_title, generator): + """Register a New Section + + This method registers a persistent section for the current + class. + + :param str section_title: the title of the section + :param generator: the generator for the section + """ + + try: + cls.persistent_sections.append([section_title, generator]) + except AttributeError: + cls.persistent_sections = [[section_title, generator]] + + @classmethod + def setup_autorun(cls, version, signum=None): + """Set Up Auto-Run + + This method sets up the Guru Meditation Report to automatically + get dumped to stderr when the given signal is received. + + :param version: the version object for the current product + :param signum: the signal to associate with running the report + """ + + if not signum and hasattr(signal, 'SIGUSR1'): + # SIGUSR1 is not supported on all platforms + signum = signal.SIGUSR1 + + if signum: + signal.signal(signum, + lambda *args: cls.handle_signal(version, *args)) + + @classmethod + def handle_signal(cls, version, *args): + """The Signal Handler + + This method (indirectly) handles receiving a registered signal and + dumping the Guru Meditation Report to stderr. This method is designed + to be curried into a proper signal handler by currying out the version + parameter. + + :param version: the version object for the current product + """ + + try: + res = cls(version).run() + except Exception: + print("Unable to run Guru Meditation Report!", + file=sys.stderr) + else: + print(res, file=sys.stderr) + + def _readd_sections(self): + del self.sections[self.start_section_index:] + + self.add_section('Package', + pgen.PackageReportGenerator(self.version_obj)) + + self.add_section('Threads', + tgen.ThreadReportGenerator()) + + self.add_section('Green Threads', + tgen.GreenThreadReportGenerator()) + + self.add_section('Configuration', + cgen.ConfigReportGenerator()) + + try: + for section_title, generator in self.persistent_sections: + self.add_section(section_title, generator) + except AttributeError: + pass + + def run(self): + self._readd_sections() + return super(GuruMeditation, self).run() + + +# GuruMeditation must come first to get the correct MRO +class TextGuruMeditation(GuruMeditation, report.TextReport): + """A Text Guru Meditation Report + + This report is the basic human-readable Guru Meditation Report + + It contains the following sections by default + (in addition to any registered persistent sections): + + - Package Information + + - Threads List + + - Green Threads List + + - Configuration Options + + :param version_obj: the version object for the current product + """ + + def __init__(self, version_obj): + super(TextGuruMeditation, self).__init__(version_obj, + 'Guru Meditation') diff --git a/rack/openstack/common/report/models/__init__.py b/rack/openstack/common/report/models/__init__.py new file mode 100644 index 0000000..7bfed3d --- /dev/null +++ b/rack/openstack/common/report/models/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides data models + +This module provides both the base data model, +as well as several predefined specific data models +to be used in reports. +""" diff --git a/rack/openstack/common/report/models/base.py b/rack/openstack/common/report/models/base.py new file mode 100644 index 0000000..90914ff --- /dev/null +++ b/rack/openstack/common/report/models/base.py @@ -0,0 +1,114 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides the base report model + +This module defines a class representing the basic report +data model from which all data models should inherit (or +at least implement similar functionality). Data models +store unserialized data generated by generators during +the report serialization process. +""" + +import collections as col +import copy + + +class ReportModel(col.MutableMapping): + """A Report Data Model + + A report data model contains data generated by some + generator method or class. Data may be read or written + using dictionary-style access, and may be read (but not + written) using object-member-style access. Additionally, + a data model may have an associated view. This view is + used to serialize the model when str() is called on the + model. An appropriate object for a view is callable with + a single parameter: the model to be serialized. + + :param data: a dictionary of data to initially associate with the model + :param attached_view: a view object to attach to this model + """ + + def __init__(self, data=None, attached_view=None): + self.attached_view = attached_view + self.data = data or {} + + def __str__(self): + self_cpy = copy.deepcopy(self) + for key in self_cpy: + if getattr(self_cpy[key], 'attached_view', None) is not None: + self_cpy[key] = str(self_cpy[key]) + + if self.attached_view is not None: + return self.attached_view(self_cpy) + else: + raise Exception("Cannot stringify model: no attached view") + + def __repr__(self): + if self.attached_view is not None: + return ("").format(cl=type(self), + dt=self.data, + vw=type(self.attached_view)) + else: + return ("").format(cl=type(self), + dt=self.data) + + def __getitem__(self, attrname): + return self.data[attrname] + + def __setitem__(self, attrname, attrval): + self.data[attrname] = attrval + + def __delitem__(self, attrname): + del self.data[attrname] + + def __contains__(self, key): + return self.data.__contains__(key) + + def __getattr__(self, attrname): + try: + return self.data[attrname] + except KeyError: + raise AttributeError( + "'{cl}' object has no attribute '{an}'".format( + cl=type(self).__name__, an=attrname + ) + ) + + def __len__(self): + return len(self.data) + + def __iter__(self): + return self.data.__iter__() + + def set_current_view_type(self, tp): + """Set the current view type + + This method attempts to set the current view + type for this model and all submodels by calling + itself recursively on all values (and ignoring the + ones that are not themselves models) + + :param tp: the type of the view ('text', 'json', 'xml', etc) + """ + + for key in self: + try: + self[key].set_current_view_type(tp) + except AttributeError: + pass diff --git a/rack/openstack/common/report/models/conf.py b/rack/openstack/common/report/models/conf.py new file mode 100644 index 0000000..061eb65 --- /dev/null +++ b/rack/openstack/common/report/models/conf.py @@ -0,0 +1,58 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides Openstack Configuration Model + +This module defines a class representing the data +model for :mod:`oslo.config` configuration options +""" + +import rack.openstack.common.report.models.with_default_views as mwdv +import rack.openstack.common.report.views.text.generic as generic_text_views + + +class ConfigModel(mwdv.ModelWithDefaultViews): + """A Configuration Options Model + + This model holds data about a set of configuration options + from :mod:`oslo.config`. It supports both the default group + of options and named option groups. + + :param conf_obj: a configuration object + :type conf_obj: :class:`oslo.config.cfg.ConfigOpts` + """ + + def __init__(self, conf_obj): + kv_view = generic_text_views.KeyValueView(dict_sep=": ", + before_dict='') + super(ConfigModel, self).__init__(text_view=kv_view) + + def opt_title(optname, co): + return co._opts[optname]['opt'].name + + self['default'] = dict( + (opt_title(optname, conf_obj), conf_obj[optname]) + for optname in conf_obj._opts + ) + + groups = {} + for groupname in conf_obj._groups: + group_obj = conf_obj._groups[groupname] + curr_group_opts = dict( + (opt_title(optname, group_obj), conf_obj[groupname][optname]) + for optname in group_obj._opts + ) + groups[group_obj.name] = curr_group_opts + + self.update(groups) diff --git a/rack/openstack/common/report/models/threading.py b/rack/openstack/common/report/models/threading.py new file mode 100644 index 0000000..6715108 --- /dev/null +++ b/rack/openstack/common/report/models/threading.py @@ -0,0 +1,100 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides threading and stack-trace models + +This module defines classes representing thread, green +thread, and stack trace data models +""" + +import traceback + +import rack.openstack.common.report.models.with_default_views as mwdv +import rack.openstack.common.report.views.text.threading as text_views + + +class StackTraceModel(mwdv.ModelWithDefaultViews): + """A Stack Trace Model + + This model holds data from a python stack trace, + commonly extracted from running thread information + + :param stack_state: the python stack_state object + """ + + def __init__(self, stack_state): + super(StackTraceModel, self).__init__( + text_view=text_views.StackTraceView()) + + if (stack_state is not None): + self['lines'] = [ + {'filename': fn, 'line': ln, 'name': nm, 'code': cd} + for fn, ln, nm, cd in traceback.extract_stack(stack_state) + ] + + if stack_state.f_exc_type is not None: + self['root_exception'] = { + 'type': stack_state.f_exc_type, + 'value': stack_state.f_exc_value + } + else: + self['root_exception'] = None + else: + self['lines'] = [] + self['root_exception'] = None + + +class ThreadModel(mwdv.ModelWithDefaultViews): + """A Thread Model + + This model holds data for information about an + individual thread. It holds both a thread id, + as well as a stack trace for the thread + + .. seealso:: + + Class :class:`StackTraceModel` + + :param int thread_id: the id of the thread + :param stack: the python stack state for the current thread + """ + + # threadId, stack in sys._current_frams().items() + def __init__(self, thread_id, stack): + super(ThreadModel, self).__init__(text_view=text_views.ThreadView()) + + self['thread_id'] = thread_id + self['stack_trace'] = StackTraceModel(stack) + + +class GreenThreadModel(mwdv.ModelWithDefaultViews): + """A Green Thread Model + + This model holds data for information about an + individual thread. Unlike the thread model, + it holds just a stack trace, since green threads + do not have thread ids. + + .. seealso:: + + Class :class:`StackTraceModel` + + :param stack: the python stack state for the green thread + """ + + # gr in greenpool.coroutines_running --> gr.gr_frame + def __init__(self, stack): + super(GreenThreadModel, self).__init__( + {'stack_trace': StackTraceModel(stack)}, + text_view=text_views.GreenThreadView()) diff --git a/rack/openstack/common/report/models/version.py b/rack/openstack/common/report/models/version.py new file mode 100644 index 0000000..e353759 --- /dev/null +++ b/rack/openstack/common/report/models/version.py @@ -0,0 +1,44 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides Openstack Version Info Model + +This module defines a class representing the data +model for Openstack package and version information +""" + +import rack.openstack.common.report.models.with_default_views as mwdv +import rack.openstack.common.report.views.text.generic as generic_text_views + + +class PackageModel(mwdv.ModelWithDefaultViews): + """A Package Information Model + + This model holds information about the current + package. It contains vendor, product, and version + information. + + :param str vendor: the product vendor + :param str product: the product name + :param str version: the product version + """ + + def __init__(self, vendor, product, version): + super(PackageModel, self).__init__( + text_view=generic_text_views.KeyValueView() + ) + + self['vendor'] = vendor + self['product'] = product + self['version'] = version diff --git a/rack/openstack/common/report/models/with_default_views.py b/rack/openstack/common/report/models/with_default_views.py new file mode 100644 index 0000000..0a3bd02 --- /dev/null +++ b/rack/openstack/common/report/models/with_default_views.py @@ -0,0 +1,81 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import rack.openstack.common.report.models.base as base_model +import rack.openstack.common.report.views.json.generic as jsonviews +import rack.openstack.common.report.views.text.generic as textviews +import rack.openstack.common.report.views.xml.generic as xmlviews + + +class ModelWithDefaultViews(base_model.ReportModel): + """A Model With Default Views of Various Types + + A model with default views has several predefined views, + each associated with a given type. This is often used for + when a submodel should have an attached view, but the view + differs depending on the serialization format + + Paramaters are as the superclass, with the exception + of any parameters ending in '_view': these parameters + get stored as default views. + + The default 'default views' are + + text + :class:`openstack.common.views.text.generic.KeyValueView` + xml + :class:`openstack.common.views.xml.generic.KeyValueView` + json + :class:`openstack.common.views.json.generic.KeyValueView` + + .. function:: to_type() + + ('type' is one of the 'default views' defined for this model) + Serializes this model using the default view for 'type' + + :rtype: str + :returns: this model serialized as 'type' + """ + + def __init__(self, *args, **kwargs): + self.views = { + 'text': textviews.KeyValueView(), + 'json': jsonviews.KeyValueView(), + 'xml': xmlviews.KeyValueView() + } + + newargs = copy.copy(kwargs) + for k in kwargs: + if k.endswith('_view'): + self.views[k[:-5]] = kwargs[k] + del newargs[k] + super(ModelWithDefaultViews, self).__init__(*args, **newargs) + + def set_current_view_type(self, tp): + self.attached_view = self.views[tp] + super(ModelWithDefaultViews, self).set_current_view_type(tp) + + def __getattr__(self, attrname): + if attrname[:3] == 'to_': + if self.views[attrname[3:]] is not None: + return lambda: self.views[attrname[3:]](self) + else: + raise NotImplementedError(( + "Model {cn.__module__}.{cn.__name__} does not have" + + " a default view for " + "{tp}").format(cn=type(self), tp=attrname[3:])) + else: + return super(ModelWithDefaultViews, self).__getattr__(attrname) diff --git a/rack/openstack/common/report/report.py b/rack/openstack/common/report/report.py new file mode 100644 index 0000000..ca8479a --- /dev/null +++ b/rack/openstack/common/report/report.py @@ -0,0 +1,189 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides Report classes + +This module defines various classes representing +reports and report sections. All reports take the +form of a report class containing various report sections. +""" + +import rack.openstack.common.report.views.text.header as header_views + + +class BasicReport(object): + """A Basic Report + + A Basic Report consists of a collection of :class:`ReportSection` + objects, each of which contains a top-level model and generator. + It collects these sections into a cohesive report which may then + be serialized by calling :func:`run` + """ + + def __init__(self): + self.sections = [] + self._state = 0 + + def add_section(self, view, generator, index=None): + """Add a section to the report + + This method adds a section with the given view and + generator to the report. An index may be specified to + insert the section at a given location in the list; + If no index is specified, the section is appended to the + list. The view is called on the model which results from + the generator when the report is run. A generator is simply + a method or callable object which takes no arguments and + returns a :class:`openstack.common.report.models.base.ReportModel` + or similar object. + + :param view: the top-level view for the section + :param generator: the method or class which generates the model + :param index: the index at which to insert the section + (or None to append it) + :type index: int or None + """ + + if index is None: + self.sections.append(ReportSection(view, generator)) + else: + self.sections.insert(index, ReportSection(view, generator)) + + def run(self): + """Run the report + + This method runs the report, having each section generate + its data and serialize itself before joining the sections + together. The BasicReport accomplishes the joining + by joining the serialized sections together with newlines. + + :rtype: str + :returns: the serialized report + """ + + return "\n".join(str(sect) for sect in self.sections) + + +class ReportSection(object): + """A Report Section + + A report section contains a generator and a top-level view. + When something attempts to serialize the section by calling + str() on it, the section runs the generator and calls the view + on the resulting model. + + .. seealso:: + + Class :class:`BasicReport` + :func:`BasicReport.add_section` + + :param view: the top-level view for this section + :param generator: the generator for this section + (any callable object which takes + no parameters and returns a data model) + """ + + def __init__(self, view, generator): + self.view = view + self.generator = generator + + def __str__(self): + return self.view(self.generator()) + + +class ReportOfType(BasicReport): + """A Report of a Certain Type + + A ReportOfType has a predefined type associated with it. + This type is automatically propagated down to the each of + the sections upon serialization by wrapping the generator + for each section. + + .. seealso:: + + Class :class:`openstack.common.report.models.with_default_view.ModelWithDefaultView` # noqa + (the entire class) + + Class :class:`openstack.common.report.models.base.ReportModel` + :func:`openstack.common.report.models.base.ReportModel.set_current_view_type` # noqa + + :param str tp: the type of the report + """ + + def __init__(self, tp): + self.output_type = tp + super(ReportOfType, self).__init__() + + def add_section(self, view, generator, index=None): + def with_type(gen): + def newgen(): + res = gen() + try: + res.set_current_view_type(self.output_type) + except AttributeError: + pass + + return res + return newgen + + super(ReportOfType, self).add_section( + view, + with_type(generator), + index + ) + + +class TextReport(ReportOfType): + """A Human-Readable Text Report + + This class defines a report that is designed to be read by a human + being. It has nice section headers, and a formatted title. + + :param str name: the title of the report + """ + + def __init__(self, name): + super(TextReport, self).__init__('text') + self.name = name + # add a title with a generator that creates an empty result model + self.add_section(name, lambda: ('|' * 72) + "\n\n") + + def add_section(self, heading, generator, index=None): + """Add a section to the report + + This method adds a section with the given title, and + generator to the report. An index may be specified to + insert the section at a given location in the list; + If no index is specified, the section is appended to the + list. The view is called on the model which results from + the generator when the report is run. A generator is simply + a method or callable object which takes no arguments and + returns a :class:`openstack.common.report.models.base.ReportModel` + or similar object. + + The model is told to serialize as text (if possible) at serialization + time by wrapping the generator. The view model's attached view + (if any) is wrapped in a + :class:`openstack.common.report.views.text.header.TitledView` + + :param str heading: the title for the section + :param generator: the method or class which generates the model + :param index: the index at which to insert the section + (or None to append) + :type index: int or None + """ + + super(TextReport, self).add_section(header_views.TitledView(heading), + generator, + index) diff --git a/rack/openstack/common/report/utils.py b/rack/openstack/common/report/utils.py new file mode 100644 index 0000000..fb71e36 --- /dev/null +++ b/rack/openstack/common/report/utils.py @@ -0,0 +1,46 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Various utilities for report generation + +This module includes various utilities +used in generating reports. +""" + +import gc + + +class StringWithAttrs(str): + """A String that can have arbitrary attributes + """ + + pass + + +def _find_objects(t): + """Find Objects in the GC State + + This horribly hackish method locates objects of a + given class in the current python instance's garbage + collection state. In case you couldn't tell, this is + horribly hackish, but is necessary for locating all + green threads, since they don't keep track of themselves + like normal threads do in python. + + :param class t: the class of object to locate + :rtype: list + :returns: a list of objects of the given type + """ + + return [o for o in gc.get_objects() if isinstance(o, t)] diff --git a/rack/openstack/common/report/views/__init__.py b/rack/openstack/common/report/views/__init__.py new file mode 100644 index 0000000..612959b --- /dev/null +++ b/rack/openstack/common/report/views/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides predefined views + +This module provides a collection of predefined views +for use in reports. It is separated by type (xml, json, or text). +Each type contains a submodule called 'generic' containing +several basic, universal views for that type. There is also +a predefined view that utilizes Jinja. +""" diff --git a/rack/openstack/common/report/views/jinja_view.py b/rack/openstack/common/report/views/jinja_view.py new file mode 100644 index 0000000..a6f340e --- /dev/null +++ b/rack/openstack/common/report/views/jinja_view.py @@ -0,0 +1,125 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides Jinja Views + +This module provides views that utilize the Jinja templating +system for serialization. For more information on Jinja, please +see http://jinja.pocoo.org/ . +""" + +import jinja2 + + +class JinjaView(object): + """A Jinja View + + This view renders the given model using the provided Jinja + template. The template can be given in various ways. + If the `VIEw_TEXT` property is defined, that is used as template. + Othewise, if a `path` parameter is passed to the constructor, that + is used to load a file containing the template. If the `path` + parameter is None, the `text` parameter is used as the template. + + The leading newline character and trailing newline character are stripped + from the template (provided they exist). Baseline indentation is + also stripped from each line. The baseline indentation is determined by + checking the indentation of the first line, after stripping off the leading + newline (if any). + + :param str path: the path to the Jinja template + :param str text: the text of the Jinja template + """ + + def __init__(self, path=None, text=None): + try: + self._text = self.VIEW_TEXT + except AttributeError: + if path is not None: + with open(path, 'r') as f: + self._text = f.read() + elif text is not None: + self._text = text + else: + self._text = "" + + if self._text[0] == "\n": + self._text = self._text[1:] + + newtext = self._text.lstrip() + amt = len(self._text) - len(newtext) + if (amt > 0): + base_indent = self._text[0:amt] + lines = self._text.splitlines() + newlines = [] + for line in lines: + if line.startswith(base_indent): + newlines.append(line[amt:]) + else: + newlines.append(line) + self._text = "\n".join(newlines) + + if self._text[-1] == "\n": + self._text = self._text[:-1] + + self._regentemplate = True + self._templatecache = None + + def __call__(self, model): + return self.template.render(**model) + + @property + def template(self): + """Get the Compiled Template + + Gets the compiled template, using a cached copy if possible + (stored in attr:`_templatecache`) or otherwise recompiling + the template if the compiled template is not present or is + invalid (due to attr:`_regentemplate` being set to True). + + :returns: the compiled Jinja template + :rtype: :class:`jinja2.Template` + """ + + if self._templatecache is None or self._regentemplate: + self._templatecache = jinja2.Template(self._text) + self._regentemplate = False + + return self._templatecache + + def _gettext(self): + """Get the Template Text + + Gets the text of the current template + + :returns: the text of the Jinja template + :rtype: str + """ + + return self._text + + def _settext(self, textval): + """Set the Template Text + + Sets the text of the current template, marking it + for recompilation next time the compiled template + is retrived via attr:`template` . + + :param str textval: the new text of the Jinja template + """ + + self._text = textval + self.regentemplate = True + + text = property(_gettext, _settext) diff --git a/rack/openstack/common/report/views/json/__init__.py b/rack/openstack/common/report/views/json/__init__.py new file mode 100644 index 0000000..47bd33b --- /dev/null +++ b/rack/openstack/common/report/views/json/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides basic JSON views + +This module provides several basic views which serialize +models into JSON. +""" diff --git a/rack/openstack/common/report/views/json/generic.py b/rack/openstack/common/report/views/json/generic.py new file mode 100644 index 0000000..319abd6 --- /dev/null +++ b/rack/openstack/common/report/views/json/generic.py @@ -0,0 +1,65 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides generic JSON views + +This modules defines several basic views for serializing +data to JSON. Submodels that have already been serialized +as JSON may have their string values marked with `__is_json__ += True` using :class:`openstack.common.report.utils.StringWithAttrs` +(each of the classes within this module does this automatically, +and non-naive serializers check for this attribute and handle +such strings specially) +""" + +import copy + +from rack.openstack.common import jsonutils as json +import rack.openstack.common.report.utils as utils + + +class BasicKeyValueView(object): + """A Basic Key-Value JSON View + + This view performs a naive serialization of a model + into JSON by simply calling :func:`json.dumps` on the model + """ + + def __call__(self, model): + res = utils.StringWithAttrs(json.dumps(model.data)) + res.__is_json__ = True + return res + + +class KeyValueView(object): + """A Key-Value JSON View + + This view performs advanced serialization to a model + into JSON. It does so by first checking all values to + see if they are marked as JSON. If so, they are deserialized + using :func:`json.loads`. Then, the copy of the model with all + JSON deserialized is reserialized into proper nested JSON using + :func:`json.dumps`. + """ + + def __call__(self, model): + # this part deals with subviews that were already serialized + cpy = copy.deepcopy(model) + for key, valstr in model.items(): + if getattr(valstr, '__is_json__', False): + cpy[key] = json.loads(valstr) + + res = utils.StringWithAttrs(json.dumps(cpy.data)) + res.__is_json__ = True + return res diff --git a/rack/openstack/common/report/views/text/__init__.py b/rack/openstack/common/report/views/text/__init__.py new file mode 100644 index 0000000..c097484 --- /dev/null +++ b/rack/openstack/common/report/views/text/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides basic text views + +This module provides several basic views which serialize +models into human-readable text. +""" diff --git a/rack/openstack/common/report/views/text/generic.py b/rack/openstack/common/report/views/text/generic.py new file mode 100644 index 0000000..7363833 --- /dev/null +++ b/rack/openstack/common/report/views/text/generic.py @@ -0,0 +1,202 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides generic text views + +This modules provides several generic views for +serializing models into human-readable text. +""" + +import collections as col + +import six + + +class MultiView(object): + """A Text View Containing Multiple Views + + This view simply serializes each + value in the data model, and then + joins them with newlines (ignoring + the key values altogether). This is + useful for serializing lists of models + (as array-like dicts). + """ + + def __call__(self, model): + res = [str(model[key]) for key in model] + return "\n".join(res) + + +class BasicKeyValueView(object): + """A Basic Key-Value Text View + + This view performs a naive serialization of a model into + text using a basic key-value method, where each + key-value pair is rendered as "key = str(value)" + """ + + def __call__(self, model): + res = "" + for key in model: + res += "{key} = {value}\n".format(key=key, value=model[key]) + + return res + + +class KeyValueView(object): + """A Key-Value Text View + + This view performs an advanced serialization of a model + into text by following the following set of rules: + + key : text + key = text + + rootkey : Mapping + :: + + rootkey = + serialize(key, value) + + key : Sequence + :: + + key = + serialize(item) + + :param str indent_str: the string used to represent one "indent" + :param str key_sep: the separator to use between keys and values + :param str dict_sep: the separator to use after a dictionary root key + :param str list_sep: the separator to use after a list root key + :param str anon_dict: the "key" to use when there is a dict in a list + (does not automatically use the dict separator) + :param before_dict: content to place on the line(s) before the a dict + root key (use None to avoid inserting an extra line) + :type before_dict: str or None + :param before_list: content to place on the line(s) before the a list + root key (use None to avoid inserting an extra line) + :type before_list: str or None + """ + + def __init__(self, + indent_str=' ', + key_sep=' = ', + dict_sep=' = ', + list_sep=' = ', + anon_dict='[dict]', + before_dict=None, + before_list=None): + self.indent_str = indent_str + self.key_sep = key_sep + self.dict_sep = dict_sep + self.list_sep = list_sep + self.anon_dict = anon_dict + self.before_dict = before_dict + self.before_list = before_list + + def __call__(self, model): + def serialize(root, rootkey, indent): + res = [] + if rootkey is not None: + res.append((self.indent_str * indent) + rootkey) + + if isinstance(root, col.Mapping): + if rootkey is None and indent > 0: + res.append((self.indent_str * indent) + self.anon_dict) + elif rootkey is not None: + res[0] += self.dict_sep + if self.before_dict is not None: + res.insert(0, self.before_dict) + + for key in root: + res.extend(serialize(root[key], key, indent + 1)) + elif (isinstance(root, col.Sequence) and + not isinstance(root, six.string_types)): + if rootkey is not None: + res[0] += self.list_sep + if self.before_list is not None: + res.insert(0, self.before_list) + + for val in root: + res.extend(serialize(val, None, indent + 1)) + else: + str_root = str(root) + if '\n' in str_root: + # we are in a submodel + if rootkey is not None: + res[0] += self.dict_sep + + list_root = [(self.indent_str * (indent + 1)) + line + for line in str_root.split('\n')] + res.extend(list_root) + else: + # just a normal key or list entry + try: + res[0] += self.key_sep + str_root + except IndexError: + res = [(self.indent_str * indent) + str_root] + + return res + + return "\n".join(serialize(model, None, -1)) + + +class TableView(object): + """A Basic Table Text View + + This view performs serialization of data into a basic table with + predefined column names and mappings. Column width is auto-calculated + evenly, column values are automatically truncated accordingly. Values + are centered in the columns. + + :param [str] column_names: the headers for each of the columns + :param [str] column_values: the item name to match each column to in + each row + :param str table_prop_name: the name of the property within the model + containing the row models + """ + + def __init__(self, column_names, column_values, table_prop_name): + self.table_prop_name = table_prop_name + self.column_names = column_names + self.column_values = column_values + self.column_width = (72 - len(column_names) + 1) / len(column_names) + + column_headers = "|".join( + "{ch[" + str(n) + "]: ^" + str(self.column_width) + "}" + for n in range(len(column_names)) + ) + + # correct for float-to-int roundoff error + test_fmt = column_headers.format(ch=column_names) + if len(test_fmt) < 72: + column_headers += ' ' * (72 - len(test_fmt)) + + vert_divider = '-' * 72 + self.header_fmt_str = column_headers + "\n" + vert_divider + "\n" + + self.row_fmt_str = "|".join( + "{cv[" + str(n) + "]: ^" + str(self.column_width) + "}" + for n in range(len(column_values)) + ) + + def __call__(self, model): + res = self.header_fmt_str.format(ch=self.column_names) + for raw_row in model[self.table_prop_name]: + row = [str(raw_row[prop_name]) for prop_name in self.column_values] + # double format is in case we have roundoff error + res += '{0: <72}\n'.format(self.row_fmt_str.format(cv=row)) + + return res diff --git a/rack/openstack/common/report/views/text/header.py b/rack/openstack/common/report/views/text/header.py new file mode 100644 index 0000000..58d06c0 --- /dev/null +++ b/rack/openstack/common/report/views/text/header.py @@ -0,0 +1,51 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Text Views With Headers + +This package defines several text views with headers +""" + + +class HeaderView(object): + """A Text View With a Header + + This view simply serializes the model and places the given + header on top. + + :param header: the header (can be anything on which str() can be called) + """ + + def __init__(self, header): + self.header = header + + def __call__(self, model): + return str(self.header) + "\n" + str(model) + + +class TitledView(HeaderView): + """A Text View With a Title + + This view simply serializes the model, and places + a preformatted header containing the given title + text on top. The title text can be up to 64 characters + long. + + :param str title: the title of the view + """ + + FORMAT_STR = ('=' * 72) + "\n===={0: ^64}====\n" + ('=' * 72) + + def __init__(self, title): + super(TitledView, self).__init__(self.FORMAT_STR.format(title)) diff --git a/rack/openstack/common/report/views/text/threading.py b/rack/openstack/common/report/views/text/threading.py new file mode 100644 index 0000000..d4574bd --- /dev/null +++ b/rack/openstack/common/report/views/text/threading.py @@ -0,0 +1,80 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides thread and stack-trace views + +This module provides a collection of views for +visualizing threads, green threads, and stack traces +in human-readable form. +""" + +import rack.openstack.common.report.views.jinja_view as jv + + +class StackTraceView(jv.JinjaView): + """A Stack Trace View + + This view displays stack trace models defined by + :class:`openstack.common.report.models.threading.StackTraceModel` + """ + + VIEW_TEXT = ( + "{% if root_exception is not none %}" + "Exception: {{ root_exception }}\n" + "------------------------------------\n" + "\n" + "{% endif %}" + "{% for line in lines %}\n" + "{{ line.filename }}:{{ line.line }} in {{ line.name }}\n" + " {% if line.code is not none %}" + "`{{ line.code }}`" + "{% else %}" + "(source not found)" + "{% endif %}\n" + "{% else %}\n" + "No Traceback!\n" + "{% endfor %}" + ) + + +class GreenThreadView(object): + """A Green Thread View + + This view displays a green thread provided by the data + model :class:`openstack.common.report.models.threading.GreenThreadModel` # noqa + """ + + FORMAT_STR = "------{thread_str: ^60}------" + "\n" + "{stack_trace}" + + def __call__(self, model): + return self.FORMAT_STR.format( + thread_str=" Green Thread ", + stack_trace=model.stack_trace + ) + + +class ThreadView(object): + """A Thread Collection View + + This view displays a python thread provided by the data + model :class:`openstack.common.report.models.threading.ThreadModel` # noqa + """ + + FORMAT_STR = "------{thread_str: ^60}------" + "\n" + "{stack_trace}" + + def __call__(self, model): + return self.FORMAT_STR.format( + thread_str=" Thread #{0} ".format(model.thread_id), + stack_trace=model.stack_trace + ) diff --git a/rack/openstack/common/report/views/xml/__init__.py b/rack/openstack/common/report/views/xml/__init__.py new file mode 100644 index 0000000..a40fec9 --- /dev/null +++ b/rack/openstack/common/report/views/xml/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides basic XML views + +This module provides several basic views which serialize +models into XML. +""" diff --git a/rack/openstack/common/report/views/xml/generic.py b/rack/openstack/common/report/views/xml/generic.py new file mode 100644 index 0000000..9db3b03 --- /dev/null +++ b/rack/openstack/common/report/views/xml/generic.py @@ -0,0 +1,85 @@ +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides generic XML views + +This modules defines several basic views for serializing +data to XML. Submodels that have already been serialized +as XML may have their string values marked with `__is_xml__ += True` using :class:`openstack.common.report.utils.StringWithAttrs` +(each of the classes within this module does this automatically, +and non-naive serializers check for this attribute and handle +such strings specially) +""" + +import collections as col +import copy +import xml.etree.ElementTree as ET + +import six + +import rack.openstack.common.report.utils as utils + + +class KeyValueView(object): + """A Key-Value XML View + + This view performs advanced serialization of a data model + into XML. It first deserializes any values marked as XML so + that they can be properly reserialized later. It then follows + the following rules to perform serialization: + + key : text/xml + The tag name is the key name, and the contents are the text or xml + key : Sequence + A wrapper tag is created with the key name, and each item is placed + in an 'item' tag + key : Mapping + A wrapper tag is created with the key name, and the serialize is called + on each key-value pair (such that each key gets its own tag) + + :param str wrapper_name: the name of the top-level element + """ + + def __init__(self, wrapper_name="model"): + self.wrapper_name = wrapper_name + + def __call__(self, model): + # this part deals with subviews that were already serialized + cpy = copy.deepcopy(model) + for key, valstr in model.items(): + if getattr(valstr, '__is_xml__', False): + cpy[key] = ET.fromstring(valstr) + + def serialize(rootmodel, rootkeyname): + res = ET.Element(rootkeyname) + + if isinstance(rootmodel, col.Mapping): + for key in rootmodel: + res.append(serialize(rootmodel[key], key)) + elif (isinstance(rootmodel, col.Sequence) + and not isinstance(rootmodel, six.string_types)): + for val in rootmodel: + res.append(serialize(val, 'item')) + elif ET.iselement(rootmodel): + res.append(rootmodel) + else: + res.text = str(rootmodel) + + return res + + res = utils.StringWithAttrs(ET.tostring(serialize(cpy, + self.wrapper_name))) + res.__is_xml__ = True + return res diff --git a/rack/openstack/common/service.py b/rack/openstack/common/service.py new file mode 100644 index 0000000..991c8b3 --- /dev/null +++ b/rack/openstack/common/service.py @@ -0,0 +1,491 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import logging as std_logging +import os +import random +import signal +import sys +import time + +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + +import eventlet +from eventlet import event +from oslo.config import cfg + +from rack.openstack.common import eventlet_backdoor +from rack.openstack.common.gettextutils import _ # noqa +from rack.openstack.common import importutils +from rack.openstack.common import log as logging +from rack.openstack.common import threadgroup + + +rpc = importutils.try_import('rack.rpc') +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self.services = Services() + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + service.backdoor_port = self.backdoor_port + self.services.add(service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self.services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + raise SignalExit(signo) + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + try: + if ready_callback: + ready_callback() + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + finally: + self.stop() + if rpc: + try: + rpc.cleanup() + except Exception: + # We're shutting down, so it doesn't matter at this point. + LOG.exception(_('Exception during rpc cleanup.')) + + return status, signo + + def wait(self, ready_callback=None): + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self): + self.children = {} + self.sigcaught = None + self.running = True + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process_handle_signal(self): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.launch_service(service) + return launcher + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() + + os._exit(status) + + LOG.info(_('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Don't block if no child processes have exited + pid, status = os.waitpid(0, os.WNOHANG) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def _respawn_children(self): + while self.running: + wrap = self._wait_child() + if not wrap: + # Yield to other threads if no children have exited + # Sleep for a short time to avoid excessive CPU usage + # (see bug #1095346) + eventlet.greenthread.sleep(.01) + continue + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + LOG.debug(_('Full set of CONF:')) + CONF.log_opt_values(LOG, std_logging.DEBUG) + + while True: + self.handle_signal() + self._respawn_children() + if self.sigcaught: + signame = _signo_to_signame(self.sigcaught) + LOG.info(_('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None + + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + # signal that the service is done shutting itself down: + self._done = event.Event() + + def reset(self): + # NOTE(Fengqian): docs for Event.reset() recommend against using it + self._done = event.Event() + + def start(self): + pass + + def stop(self): + self.tg.stop() + self.tg.wait() + # Signal that service cleanup is done: + if not self._done.ready(): + self._done.send() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = event.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + if not self.done.ready(): + self.done.send() + + # reap threads: + self.tg.stop() + + def wait(self): + self.tg.wait() + + def restart(self): + self.stop() + self.done = event.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) + + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + + +def launch(service, workers=None): + if workers: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + else: + launcher = ServiceLauncher() + launcher.launch_service(service) + return launcher diff --git a/rack/openstack/common/sslutils.py b/rack/openstack/common/sslutils.py new file mode 100644 index 0000000..4f68ea5 --- /dev/null +++ b/rack/openstack/common/sslutils.py @@ -0,0 +1,98 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import ssl + +from oslo.config import cfg + +from rack.openstack.common.gettextutils import _ + + +ssl_opts = [ + cfg.StrOpt('ca_file', + default=None, + help="CA certificate file to use to verify " + "connecting clients."), + cfg.StrOpt('cert_file', + default=None, + help="Certificate file to use when starting " + "the server securely."), + cfg.StrOpt('key_file', + default=None, + help="Private key file to use when starting " + "the server securely."), +] + + +CONF = cfg.CONF +CONF.register_opts(ssl_opts, "ssl") + + +def is_enabled(): + cert_file = CONF.ssl.cert_file + key_file = CONF.ssl.key_file + ca_file = CONF.ssl.ca_file + use_ssl = cert_file or key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError(_("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError(_("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError(_("Unable to find key_file : %s") % key_file) + + if use_ssl and (not cert_file or not key_file): + raise RuntimeError(_("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + + return use_ssl + + +def wrap(sock): + ssl_kwargs = { + 'server_side': True, + 'certfile': CONF.ssl.cert_file, + 'keyfile': CONF.ssl.key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl.ca_file: + ssl_kwargs['ca_certs'] = CONF.ssl.ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + +_SSL_PROTOCOLS = { + "tlsv1": ssl.PROTOCOL_TLSv1, + "sslv23": ssl.PROTOCOL_SSLv23, + "sslv3": ssl.PROTOCOL_SSLv3 +} + +try: + _SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2 +except AttributeError: + pass + + +def validate_ssl_version(version): + key = version.lower() + try: + return _SSL_PROTOCOLS[key] + except KeyError: + raise RuntimeError(_("Invalid SSL version : %s") % version) diff --git a/rack/openstack/common/strutils.py b/rack/openstack/common/strutils.py new file mode 100644 index 0000000..5d908bf --- /dev/null +++ b/rack/openstack/common/strutils.py @@ -0,0 +1,216 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import re +import sys +import unicodedata + +from rack.openstack.common.gettextutils import _ + + +# Used for looking up extensions of text +# to their 'multiplied' byte amount +BYTE_MULTIPLIERS = { + '': 1, + 't': 1024 ** 4, + 'g': 1024 ** 3, + 'm': 1024 ** 2, + 'k': 1024, +} +BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)') + +TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') +FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') + +SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") +SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") + + +def int_from_bool_as_string(subject): + """Interpret a string as a boolean and return either 1 or 0. + + Any string value in: + + ('True', 'true', 'On', 'on', '1') + + is interpreted as a boolean True. + + Useful for JSON-decoded stuff and config file parsing + """ + return bool_from_string(subject) and 1 or 0 + + +def bool_from_string(subject, strict=False): + """Interpret a string as a boolean. + + A case-insensitive match is performed such that strings matching 't', + 'true', 'on', 'y', 'yes', or '1' are considered True and, when + `strict=False`, anything else is considered False. + + Useful for JSON-decoded stuff and config file parsing. + + If `strict=True`, unrecognized values, including None, will raise a + ValueError which is useful when parsing values passed in from an API call. + Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'. + """ + if not isinstance(subject, basestring): + subject = str(subject) + + lowered = subject.strip().lower() + + if lowered in TRUE_STRINGS: + return True + elif lowered in FALSE_STRINGS: + return False + elif strict: + acceptable = ', '.join( + "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) + msg = _("Unrecognized value '%(val)s', acceptable values are:" + " %(acceptable)s") % {'val': subject, + 'acceptable': acceptable} + raise ValueError(msg) + else: + return False + + +def safe_decode(text, incoming=None, errors='strict'): + """Decodes incoming str using `incoming` if they're not already unicode. + + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a unicode `incoming` encoded + representation of it. + :raises TypeError: If text is not an isntance of basestring + """ + if not isinstance(text, basestring): + raise TypeError("%s can't be decoded" % type(text)) + + if isinstance(text, unicode): + return text + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + try: + return text.decode(incoming, errors) + except UnicodeDecodeError: + # Note(flaper87) If we get here, it means that + # sys.stdin.encoding / sys.getdefaultencoding + # didn't return a suitable encoding to decode + # text. This happens mostly when global LANG + # var is not set correctly and there's no + # default encoding. In this case, most likely + # python will use ASCII or ANSI encoders as + # default encodings but they won't be capable + # of decoding non-ASCII characters. + # + # Also, UTF-8 is being used since it's an ASCII + # extension. + return text.decode('utf-8', errors) + + +def safe_encode(text, incoming=None, + encoding='utf-8', errors='strict'): + """Encodes incoming str/unicode using `encoding`. + + If incoming is not specified, text is expected to be encoded with + current python's default encoding. (`sys.getdefaultencoding`) + + :param incoming: Text's current encoding + :param encoding: Expected encoding for text (Default UTF-8) + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: text or a bytestring `encoding` encoded + representation of it. + :raises TypeError: If text is not an isntance of basestring + """ + if not isinstance(text, basestring): + raise TypeError("%s can't be encoded" % type(text)) + + if not incoming: + incoming = (sys.stdin.encoding or + sys.getdefaultencoding()) + + if isinstance(text, unicode): + return text.encode(encoding, errors) + elif text and encoding != incoming: + # Decode text before encoding it with `encoding` + text = safe_decode(text, incoming, errors) + return text.encode(encoding, errors) + + return text + + +def to_bytes(text, default=0): + """Converts a string into an integer of bytes. + + Looks at the last characters of the text to determine + what conversion is needed to turn the input text into a byte number. + Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive) + + :param text: String input for bytes size conversion. + :param default: Default return value when text is blank. + + """ + match = BYTE_REGEX.search(text) + if match: + magnitude = int(match.group(1)) + mult_key_org = match.group(2) + if not mult_key_org: + return magnitude + elif text: + msg = _('Invalid string format: %s') % text + raise TypeError(msg) + else: + return default + mult_key = mult_key_org.lower().replace('b', '', 1) + multiplier = BYTE_MULTIPLIERS.get(mult_key) + if multiplier is None: + msg = _('Unknown byte multiplier: %s') % mult_key_org + raise TypeError(msg) + return magnitude * multiplier + + +def to_slug(value, incoming=None, errors="strict"): + """Normalize string. + + Convert to lowercase, remove non-word characters, and convert spaces + to hyphens. + + Inspired by Django's `slugify` filter. + + :param value: Text to slugify + :param incoming: Text's current encoding + :param errors: Errors handling policy. See here for valid + values http://docs.python.org/2/library/codecs.html + :returns: slugified unicode representation of `value` + :raises TypeError: If text is not an instance of basestring + """ + value = safe_decode(value, incoming, errors) + # NOTE(aababilov): no need to use safe_(encode|decode) here: + # encodings are always "ascii", error handling is always "ignore" + # and types are always known (first: unicode; second: str) + value = unicodedata.normalize("NFKD", value).encode( + "ascii", "ignore").decode("ascii") + value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() + return SLUGIFY_HYPHENATE_RE.sub("-", value) diff --git a/rack/openstack/common/threadgroup.py b/rack/openstack/common/threadgroup.py new file mode 100644 index 0000000..f185f31 --- /dev/null +++ b/rack/openstack/common/threadgroup.py @@ -0,0 +1,121 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from eventlet import greenlet +from eventlet import greenpool +from eventlet import greenthread + +from rack.openstack.common import log as logging +from rack.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + +class ThreadGroup(object): + """The point of the ThreadGroup classis to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_dynamic_timer(self, callback, initial_delay=None, + periodic_interval_max=None, *args, **kwargs): + timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) + timer.start(initial_delay=initial_delay, + periodic_interval_max=periodic_interval_max) + self.timers.append(timer) + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + + def thread_done(self, thread): + self.threads.remove(thread) + + def stop(self): + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = greenthread.getcurrent() + for x in self.threads: + if x is current: + continue + try: + x.wait() + except greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/rack/openstack/common/timeutils.py b/rack/openstack/common/timeutils.py new file mode 100644 index 0000000..d8cf539 --- /dev/null +++ b/rack/openstack/common/timeutils.py @@ -0,0 +1,210 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime +import time + +import iso8601 +import six + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format.""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format.""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(six.text_type(e)) + except TypeError as e: + raise ValueError(six.text_type(e)) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object.""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, six.string_types): + before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, six.string_types): + after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns a iso8601 formated date from timestamp.""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. + """ + utcnow.override_time = override_time or datetime.datetime.utcnow() + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times. + """ + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """Return the difference between two timing objects. + + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """Determines if time is going to happen in the next window seconds. + + :params dt: the time + :params window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/rack/openstack/common/units.py b/rack/openstack/common/units.py new file mode 100644 index 0000000..84b518c --- /dev/null +++ b/rack/openstack/common/units.py @@ -0,0 +1,38 @@ +# Copyright 2013 IBM Corp +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit constants +""" + +#Binary unit constants. +Ki = 1024 +Mi = 1024 ** 2 +Gi = 1024 ** 3 +Ti = 1024 ** 4 +Pi = 1024 ** 5 +Ei = 1024 ** 6 +Zi = 1024 ** 7 +Yi = 1024 ** 8 + +#Decimal unit constants. +k = 1000 +M = 1000 ** 2 +G = 1000 ** 3 +T = 1000 ** 4 +P = 1000 ** 5 +E = 1000 ** 6 +Z = 1000 ** 7 +Y = 1000 ** 8 diff --git a/rack/openstack/common/uuidutils.py b/rack/openstack/common/uuidutils.py new file mode 100644 index 0000000..234b880 --- /dev/null +++ b/rack/openstack/common/uuidutils.py @@ -0,0 +1,37 @@ +# Copyright (c) 2012 Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +UUID related utilities and helper functions. +""" + +import uuid + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False diff --git a/rack/openstack/common/versionutils.py b/rack/openstack/common/versionutils.py new file mode 100644 index 0000000..f7b1f8a --- /dev/null +++ b/rack/openstack/common/versionutils.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helpers for comparing version strings. +""" + +import pkg_resources + + +def is_compatible(requested_version, current_version, same_major=True): + """Determine whether `requested_version` is satisfied by + `current_version`; in other words, `current_version` is >= + `requested_version`. + + :param requested_version: version to check for compatibility + :param current_version: version to check against + :param same_major: if True, the major version must be identical between + `requested_version` and `current_version`. This is used when a + major-version difference indicates incompatibility between the two + versions. Since this is the common-case in practice, the default is + True. + :returns: True if compatible, False if not + """ + requested_parts = pkg_resources.parse_version(requested_version) + current_parts = pkg_resources.parse_version(current_version) + + if same_major and (requested_parts[0] != current_parts[0]): + return False + + return current_parts >= requested_parts diff --git a/rack/openstack/common/xmlutils.py b/rack/openstack/common/xmlutils.py new file mode 100644 index 0000000..b131d3e --- /dev/null +++ b/rack/openstack/common/xmlutils.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from xml.dom import minidom +from xml.parsers import expat +from xml import sax +from xml.sax import expatreader + + +class ProtectedExpatParser(expatreader.ExpatParser): + """An expat parser which disables DTD's and entities by default.""" + + def __init__(self, forbid_dtd=True, forbid_entities=True, + *args, **kwargs): + # Python 2.x old style class + expatreader.ExpatParser.__init__(self, *args, **kwargs) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities + + def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): + raise ValueError("Inline DTD forbidden") + + def entity_decl(self, entityName, is_parameter_entity, value, base, + systemId, publicId, notationName): + raise ValueError(" entity declaration forbidden") + + def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): + # expat 1.2 + raise ValueError(" unparsed entity forbidden") + + def external_entity_ref(self, context, base, systemId, publicId): + raise ValueError(" external entity forbidden") + + def notation_decl(self, name, base, sysid, pubid): + raise ValueError(" notation forbidden") + + def reset(self): + expatreader.ExpatParser.reset(self) + if self.forbid_dtd: + self._parser.StartDoctypeDeclHandler = self.start_doctype_decl + self._parser.EndDoctypeDeclHandler = None + if self.forbid_entities: + self._parser.EntityDeclHandler = self.entity_decl + self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl + self._parser.ExternalEntityRefHandler = self.external_entity_ref + self._parser.NotationDeclHandler = self.notation_decl + try: + self._parser.SkippedEntityHandler = None + except AttributeError: + # some pyexpat versions do not support SkippedEntity + pass + + +def safe_minidom_parse_string(xml_string): + """Parse an XML string using minidom safely. + + """ + try: + return minidom.parseString(xml_string, parser=ProtectedExpatParser()) + except sax.SAXParseException: + raise expat.ExpatError() diff --git a/rack/paths.py b/rack/paths.py new file mode 100644 index 0000000..794ba7b --- /dev/null +++ b/rack/paths.py @@ -0,0 +1,64 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from oslo.config import cfg + +path_opts = [ + cfg.StrOpt('pybasedir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory where the rack python module is installed'), + cfg.StrOpt('bindir', + default=os.path.join(sys.prefix, 'local', 'bin'), + help='Directory where rack binaries are installed'), + cfg.StrOpt('state_path', + default='$pybasedir', + help="Top-level directory for maintaining rack's state"), +] + +CONF = cfg.CONF +CONF.register_opts(path_opts) + + +def basedir_def(*args): + """Return an uninterpolated path relative to $pybasedir.""" + return os.path.join('$pybasedir', *args) + + +def bindir_def(*args): + """Return an uninterpolated path relative to $bindir.""" + return os.path.join('$bindir', *args) + + +def state_path_def(*args): + """Return an uninterpolated path relative to $state_path.""" + return os.path.join('$state_path', *args) + + +def basedir_rel(*args): + """Return a path relative to $pybasedir.""" + return os.path.join(CONF.pybasedir, *args) + + +def bindir_rel(*args): + """Return a path relative to $bindir.""" + return os.path.join(CONF.bindir, *args) + + +def state_path_rel(*args): + """Return a path relative to $state_path.""" + return os.path.join(CONF.state_path, *args) diff --git a/rack/policy.py b/rack/policy.py new file mode 100644 index 0000000..e7009a4 --- /dev/null +++ b/rack/policy.py @@ -0,0 +1,134 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Policy Engine For Nova.""" + +import os.path + +from oslo.config import cfg + +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import policy +from rack import utils + + +policy_opts = [ + cfg.StrOpt('policy_file', + default='policy.json', + help=_('JSON file representing policy')), + cfg.StrOpt('policy_default_rule', + default='default', + help=_('Rule checked when requested rule is not found')), + ] + +CONF = cfg.CONF +CONF.register_opts(policy_opts) + +_POLICY_PATH = None +_POLICY_CACHE = {} + + +def reset(): + global _POLICY_PATH + global _POLICY_CACHE + _POLICY_PATH = None + _POLICY_CACHE = {} + policy.reset() + + +def init(): + global _POLICY_PATH + global _POLICY_CACHE + if not _POLICY_PATH: + _POLICY_PATH = CONF.policy_file + if not os.path.exists(_POLICY_PATH): + _POLICY_PATH = CONF.find_file(_POLICY_PATH) + if not _POLICY_PATH: + raise exception.ConfigNotFound(path=CONF.policy_file) + utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, + reload_func=_set_rules) + + +def _set_rules(data): + default_rule = CONF.policy_default_rule + policy.set_rules(policy.Rules.load_json(data, default_rule)) + + +def enforce(context, action, target, do_raise=True): + """Verifies that the action is valid on the target in this context. + + :param context: rack context + :param action: string representing the action to be checked + this should be colon separated for clarity. + i.e. ``compute:create_instance``, + ``compute:attach_volume``, + ``volume:attach_volume`` + :param target: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + :param do_raise: if True (the default), raises PolicyNotAuthorized; + if False, returns False + + :raises rack.exception.PolicyNotAuthorized: if verification fails + and do_raise is True. + + :return: returns a non-False value (not necessarily "True") if + authorized, and the exact value False if not authorized and + do_raise is False. + """ + init() + + credentials = context.to_dict() + + # Add the exception arguments if asked to do a raise + extra = {} + if do_raise: + extra.update(exc=exception.PolicyNotAuthorized, action=action) + + return policy.check(action, target, credentials, **extra) + + +def check_is_admin(context): + """Whether or not roles contains 'admin' role according to policy setting. + + """ + init() + + #the target is user-self + credentials = context.to_dict() + target = credentials + + return policy.check('context_is_admin', target, credentials) + + +@policy.register('is_admin') +class IsAdminCheck(policy.Check): + """An explicit check for is_admin.""" + + def __init__(self, kind, match): + """Initialize the check.""" + + self.expected = (match.lower() == 'true') + + super(IsAdminCheck, self).__init__(kind, str(self.expected)) + + def __call__(self, target, creds): + """Determine whether is_admin matches the requested value.""" + + return creds['is_admin'] == self.expected + + +def get_rules(): + return policy._rules diff --git a/rack/resourceoperator/__init__.py b/rack/resourceoperator/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/resourceoperator/manager.py b/rack/resourceoperator/manager.py new file mode 100644 index 0000000..f535888 --- /dev/null +++ b/rack/resourceoperator/manager.py @@ -0,0 +1,160 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +ResourceOperator Service +""" + +from rack import db +from oslo import messaging + +from rack import exception +from rack import manager +from rack.openstack.common import log as logging + +from rack.resourceoperator.openstack import networks +from rack.resourceoperator.openstack import keypairs +from rack.resourceoperator.openstack import securitygroups +from rack.resourceoperator.openstack import processes + + +LOG = logging.getLogger(__name__) + + +class ResourceOperatorManager(manager.Manager): + + target = messaging.Target(version='1.0') + + def __init__(self, scheduler_driver=None, *args, **kwargs): + super(ResourceOperatorManager, self).__init__( + service_name='resourceoperator', + *args, **kwargs) + self.network_client = networks.NetworkAPI() + self.keypair_client = keypairs.KeypairAPI() + self.securitygroup_client = securitygroups.SecuritygroupAPI() + self.securitygrouprule_client = securitygroups.SecuritygroupruleAPI() + self.process_client = processes.ProcessesAPI() + + def network_create(self, context, network): + update_values = {} + try: + neutron_network_id = self.network_client.network_create( + network.get("display_name"), + network.get("subnet"), + network.get("gateway"), + network.get("dns_nameservers"), + network.get("ext_router")) + update_values["neutron_network_id"] = neutron_network_id + update_values["status"] = "ACTIVE" + except Exception as e: + LOG.exception(e) + update_values["status"] = "ERROR" + try: + db.network_update(context, network["network_id"], update_values) + except Exception as e: + LOG.exception(e) + + def network_delete(self, context, neutron_network_id, ext_router): + try: + self.network_client.network_delete(neutron_network_id, ext_router) + except Exception as e: + LOG.exception(e) + + def keypair_create(self, context, gid, keypair_id, name): + try: + values = self.keypair_client.keypair_create(name) + values["status"] = "ACTIVE" + except Exception as e: + LOG.exception(e) + values = {"status": "ERROR"} + try: + db.keypair_update(context, gid, keypair_id, values) + except Exception as e: + LOG.exception(e) + + def keypair_delete(self, context, nova_keypair_id): + try: + self.keypair_client.keypair_delete(nova_keypair_id) + except (exception.KeypairDeleteFailed, + exception.InvalidOpenStackCredential) as e: + LOG.exception(e) + except Exception as e: + LOG.exception(e) + + + def securitygroup_create(self, context, gid, securitygroup_id, name, securitygrouprules): + values = {} + try: + values["neutron_securitygroup_id"] =\ + self.securitygroup_client.securitygroup_create(name) + for securitygrouprule in securitygrouprules: + self.securitygrouprule_client.securitygrouprule_create( + neutron_securitygroup_id=values["neutron_securitygroup_id"], + protocol=securitygrouprule.get("protocol"), + port_range_min=securitygrouprule.get("port_range_min"), + port_range_max=securitygrouprule.get("port_range_max"), + remote_neutron_securitygroup_id=securitygrouprule.get("remote_neutron_securitygroup_id"), + remote_ip_prefix=securitygrouprule.get("remote_ip_prefix") + ) + values["status"] = "ACTIVE" + db.securitygroup_update(context, gid, securitygroup_id, values) + except Exception as e: + values["status"] = "ERROR" + db.securitygroup_update(context, gid, securitygroup_id, values) + LOG.exception(e) + + def securitygroup_delete(self, context, neutron_securitygroup_id): + try: + self.securitygroup_client.securitygroup_delete(neutron_securitygroup_id) + except Exception as e: + LOG.exception(e) + + def process_create(self, + context, + pid, + ppid, + gid, + name, + glance_image_id, + nova_flavor_id, + nova_keypair_id, + neutron_securitygroup_ids, + neutron_network_ids, + metadata + ): + update_values = {} + try: + metadata["pid"] = pid + metadata["ppid"] = ppid + metadata["gid"] = gid + nova_instance_id = self.process_client.process_create(name, + glance_image_id, + nova_flavor_id, + nova_keypair_id, + neutron_securitygroup_ids, + neutron_network_ids, + metadata) + update_values["nova_instance_id"] = nova_instance_id + update_values["status"] = "ACTIVE" + db.process_update(context, gid, pid, update_values) + except Exception as e: + update_values["status"] = "ERROR" + db.process_update(context, gid, pid, update_values) + LOG.exception(e) + + def process_delete(self, context, nova_instance_id): + try: + self.process_client.process_delete(nova_instance_id) + except Exception as e: + LOG.exception(e) + diff --git a/rack/resourceoperator/openstack/__init__.py b/rack/resourceoperator/openstack/__init__.py new file mode 100644 index 0000000..a6d0767 --- /dev/null +++ b/rack/resourceoperator/openstack/__init__.py @@ -0,0 +1,65 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg +from novaclient.v1_1 import client as nova_client +from neutronclient.v2_0 import client as neutron_client + +from rack import exception +from rack.openstack.common import log as logging + +openstack_client_opts = [ + cfg.StrOpt('os_username', + help='Valid username for OpenStack'), + cfg.StrOpt('os_password', + help='Valid password for OpenStack'), + cfg.StrOpt('os_tenant_name', + help='Valid tenant name for OpenStack'), + cfg.StrOpt('os_auth_url', + help='The keystone endpoint') +] + +CONF = cfg.CONF +CONF.register_opts(openstack_client_opts) + +LOG = logging.getLogger(__name__) + + +def get_nova_client(): + credentials = { + "username": CONF.os_username, + "api_key": CONF.os_password, + "project_id": CONF.os_tenant_name, + "auth_url": CONF.os_auth_url + } + + for key, value in credentials.items(): + if not value: + raise exception.InvalidOpenStackCredential(credential=key) + + return nova_client.Client(**credentials) + + +def get_neutron_client(): + credentials = { + "username": CONF.os_username, + "password": CONF.os_password, + "tenant_name": CONF.os_tenant_name, + "auth_url": CONF.os_auth_url + } + + for key, value in credentials.items(): + if not value: + raise exception.InvalidOpenStackCredential(credential=key) + + return neutron_client.Client(**credentials) diff --git a/rack/resourceoperator/openstack/keypairs.py b/rack/resourceoperator/openstack/keypairs.py new file mode 100644 index 0000000..7cfc84a --- /dev/null +++ b/rack/resourceoperator/openstack/keypairs.py @@ -0,0 +1,47 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg + +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.resourceoperator import openstack as os_client + +LOG = logging.getLogger(__name__) + + +class KeypairAPI(object): + def __init__(self): + super(KeypairAPI, self).__init__() + + def keypair_create(self, name): + nova = os_client.get_nova_client() + try: + keypair = nova.keypairs.create(name) + except Exception as e: + LOG.exception(e) + raise exception.KeypairCreateFailed() + + values = {} + values["nova_keypair_id"] = keypair.name + values["private_key"] = keypair.private_key + return values + + def keypair_delete(self, nova_keypair_id): + nova = os_client.get_nova_client() + try: + nova.keypairs.delete(nova_keypair_id) + except Exception as e: + LOG.exception(e) + raise exception.KeypairDeleteFailed() diff --git a/rack/resourceoperator/openstack/networks.py b/rack/resourceoperator/openstack/networks.py new file mode 100644 index 0000000..9b4ded2 --- /dev/null +++ b/rack/resourceoperator/openstack/networks.py @@ -0,0 +1,68 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack import exception +from rack.resourceoperator import openstack as os_client +from rack.openstack.common import log as logging +from rack.openstack.common.gettextutils import _ + + +LOG = logging.getLogger(__name__) + + +class NetworkAPI(object): + + def network_create(self, name, cidr, gateway=None, dns_nameservers=None, ext_router=None): + neutron = os_client.get_neutron_client() + + try: + create_network_body = {"network": {"name": name}} + network = neutron.create_network(create_network_body)["network"] + + create_subnet_body = { + "subnet":{ + "network_id": network["id"], + "ip_version": 4, + "cidr": cidr} + } + if gateway: + create_subnet_body["subnet"]["gateway_ip"] = gateway + if dns_nameservers: + create_subnet_body["subnet"]["dns_nameservers"] = dns_nameservers + subnet = neutron.create_subnet(create_subnet_body)["subnet"] + + if ext_router: + add_interface_router_body = {"subnet_id": subnet["id"]} + neutron.add_interface_router(ext_router, add_interface_router_body) + + except Exception as e: + LOG.exception(e) + raise exception.NetworkCreateFailed() + + return network["id"] + + def network_delete(self, neutron_network_id, ext_router=None): + neutron = os_client.get_neutron_client() + + try: + if ext_router: + network = neutron.show_network(neutron_network_id)["network"] + subnets = network["subnets"] + for subnet in subnets: + neutron.remove_interface_router(ext_router, {"subnet_id": subnet}) + + neutron.delete_network(neutron_network_id) + + except Exception as e: + LOG.exception(e) + raise exception.NetworkDeleteFailed() diff --git a/rack/resourceoperator/openstack/processes.py b/rack/resourceoperator/openstack/processes.py new file mode 100644 index 0000000..58baca9 --- /dev/null +++ b/rack/resourceoperator/openstack/processes.py @@ -0,0 +1,64 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack import exception +from rack.openstack.common import log as logging +from rack.resourceoperator import openstack as os_client +import time + + +LOG = logging.getLogger(__name__) + + +class ProcessesAPI(object): + def process_create(self, + display_name, + glance_image_id, + nova_flavor_id, + nova_keypair_id, + neutron_securitygroup_ids, + neutron_network_ids, + metadata + ): + try: + nova = os_client.get_nova_client() + nics = [] + for network_id in neutron_network_ids: + nics.append({"net-id": network_id}) + process = nova.servers.create(name=display_name, + image=glance_image_id, + flavor=nova_flavor_id, + meta=metadata, + nics=nics, + key_name=nova_keypair_id, + security_groups=neutron_securitygroup_ids + ) + + while process.status != "ACTIVE": + if process.status == "ERROR": + raise Exception() + time.sleep(5) + process = nova.servers.get(process.id) + + return process.id + except Exception as e: + LOG.exception(e) + raise exception.ProcessCreateFailed() + + def process_delete(self, nova_instance_id): + try: + nova = os_client.get_nova_client() + nova.servers.delete(nova_instance_id) + except Exception as e: + LOG.exception(e) + raise exception.ProcessDeleteFailed() diff --git a/rack/resourceoperator/openstack/securitygroups.py b/rack/resourceoperator/openstack/securitygroups.py new file mode 100644 index 0000000..6e08ac3 --- /dev/null +++ b/rack/resourceoperator/openstack/securitygroups.py @@ -0,0 +1,73 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack import exception +from rack.openstack.common import log as logging +from rack.resourceoperator import openstack as os_client + +LOG = logging.getLogger(__name__) + + +class SecuritygroupAPI(object): + def securitygroup_create(self, name): + try: + neutron = os_client.get_neutron_client() + res = neutron.create_security_group({"security_group": + {"name": name}}) + neutron_securitygroup_id = res['security_group']['id'] + except Exception as e: + LOG.exception(e) + raise exception.SecuritygroupCreateFailed() + + return neutron_securitygroup_id + + + def securitygroup_delete(self, neutron_securitygroup_id): + try: + neutron = os_client.get_neutron_client() + neutron.delete_security_group(neutron_securitygroup_id) + except Exception as e: + LOG.exception(e) + raise exception.SecuritygroupDeleteFailed() + + +class SecuritygroupruleAPI(object): + def securitygrouprule_create(self, neutron_securitygroup_id, protocol, + port_range_min=None, port_range_max=None, + remote_neutron_securitygroup_id=None, remote_ip_prefix=None, + direction="ingress", ethertype="IPv4"): + try: + self.neutron = os_client.get_neutron_client() + if remote_neutron_securitygroup_id: + self.neutron.create_security_group_rule({"security_group_rule": + {"direction": direction, + "ethertype": ethertype, + "security_group_id": neutron_securitygroup_id, + "protocol": protocol, + "port_range_min": port_range_min or port_range_max, + "port_range_max": port_range_max, + "remote_group_id": remote_neutron_securitygroup_id, + }}) + elif remote_ip_prefix: + self.neutron.create_security_group_rule({"security_group_rule": + {"direction": direction, + "ethertype": ethertype, + "security_group_id": neutron_securitygroup_id, + "protocol": protocol, + "port_range_min": port_range_min or port_range_max, + "port_range_max": port_range_max, + "remote_ip_prefix": remote_ip_prefix, + }}) + except Exception as e: + LOG.exception(e) + raise exception.SecuritygroupCreateFailed() diff --git a/rack/resourceoperator/rpcapi.py b/rack/resourceoperator/rpcapi.py new file mode 100644 index 0000000..555260f --- /dev/null +++ b/rack/resourceoperator/rpcapi.py @@ -0,0 +1,107 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Client side of the scheduler manager RPC API. +""" + +from oslo.config import cfg +from oslo import messaging + +from rack import object as rack_object +from rack import rpc + +rpcapi_opts = [ + cfg.StrOpt('resourceoperator_topic', + default='resourceoperator', + help='The topic resouceoperator nodes listen on'), +] + +CONF = cfg.CONF +CONF.register_opts(rpcapi_opts) + +rpcapi_cap_opt = cfg.StrOpt('resourceoperator', + help='Set a version cap for messages sent to resourceoperator services') +CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') + + +class ResourceOperatorAPI(object): + + '''Client side of the resource_operator rpc API. + + API version history: + + 1.0 - Initial version. + ''' + + VERSION_ALIASES = { + 'juno': '1.0', + } + + def __init__(self): + super(ResourceOperatorAPI, self).__init__() + target = messaging.Target(topic=CONF.resourceoperator_topic, version='1.0') + version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.resourceoperator, + CONF.upgrade_levels.resourceoperator) + serializer = rack_object.RackObjectSerializer() + self.client = rpc.get_client(target, version_cap=version_cap, + serializer=serializer) + + def keypair_create(self, ctxt, host, gid, keypair_id, name): + cctxt = self.client.prepare(server=host) + cctxt.cast(ctxt, "keypair_create", gid=gid, + keypair_id=keypair_id, name=name) + + def keypair_delete(self, ctxt, host, nova_keypair_id): + cctxt = self.client.prepare(server=host) + cctxt.cast(ctxt, "keypair_delete", + nova_keypair_id=nova_keypair_id) + + def network_create(self, ctxt, host, network): + cctxt = self.client.prepare(server=host) + cctxt.cast(ctxt, 'network_create', network=network) + + def network_delete(self, ctxt, host, neutron_network_id, ext_router): + cctxt = self.client.prepare(server=host) + cctxt.cast(ctxt, 'network_delete', + neutron_network_id=neutron_network_id, + ext_router=ext_router) + + def securitygroup_create(self, ctxt, host, gid, securitygroup_id, name, securitygrouprules): + cctxt = self.client.prepare(server=host) + cctxt.cast(ctxt, "securitygroup_create", + gid=gid, + securitygroup_id=securitygroup_id, + name=name, + securitygrouprules=securitygrouprules) + + def securitygroup_delete(self, ctxt, host, neutron_securitygroup_id): + cctxt = self.client.prepare(server=host) + cctxt.cast(ctxt, "securitygroup_delete", + neutron_securitygroup_id=neutron_securitygroup_id) + + def process_create(self, ctxt, host, pid, ppid, gid, name, + glance_image_id, nova_flavor_id, + nova_keypair_id, neutron_securitygroup_ids, + neutron_network_ids, metadata): + cctxt = self.client.prepare(server=host) + cctxt.cast(ctxt, "process_create", + pid=pid, ppid=ppid, gid=gid, name=name, + glance_image_id=glance_image_id, nova_flavor_id=nova_flavor_id, + nova_keypair_id=nova_keypair_id, neutron_securitygroup_ids=neutron_securitygroup_ids, + neutron_network_ids=neutron_network_ids, metadata=metadata) + + def process_delete(self, ctxt, host, nova_instance_id): + cctxt = self.client.prepare(server=host) + cctxt.cast(ctxt, "process_delete", nova_instance_id=nova_instance_id) diff --git a/rack/rpc.py b/rack/rpc.py new file mode 100644 index 0000000..7a04d91 --- /dev/null +++ b/rack/rpc.py @@ -0,0 +1,144 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'init', + 'cleanup', + 'set_defaults', + 'add_extra_exmods', + 'clear_extra_exmods', + 'get_allowed_exmods', + 'RequestContextSerializer', + 'get_client', + 'get_server', + 'get_notifier', + 'TRANSPORT_ALIASES', +] + +from oslo.config import cfg +from oslo import messaging + +import rack.context +import rack.exception +from rack.openstack.common import jsonutils + +CONF = cfg.CONF +TRANSPORT = None +NOTIFIER = None + +ALLOWED_EXMODS = [ + rack.exception.__name__, +] +EXTRA_EXMODS = [] + +TRANSPORT_ALIASES = { + 'rack.openstack.common.rpc.impl_kombu': 'rabbit', + 'rack.openstack.common.rpc.impl_qpid': 'qpid', + 'rack.openstack.common.rpc.impl_zmq': 'zmq', + 'rack.rpc.impl_kombu': 'rabbit', + 'rack.rpc.impl_qpid': 'qpid', + 'rack.rpc.impl_zmq': 'zmq', +} + + +def init(conf): + global TRANSPORT, NOTIFIER + exmods = get_allowed_exmods() + TRANSPORT = messaging.get_transport(conf, + allowed_remote_exmods=exmods, + aliases=TRANSPORT_ALIASES) + serializer = RequestContextSerializer(JsonPayloadSerializer()) + NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer) + + +def cleanup(): + global TRANSPORT, NOTIFIER + assert TRANSPORT is not None + assert NOTIFIER is not None + TRANSPORT.cleanup() + TRANSPORT = NOTIFIER = None + + +def set_defaults(control_exchange): + messaging.set_transport_defaults(control_exchange) + + +def add_extra_exmods(*args): + EXTRA_EXMODS.extend(args) + + +def clear_extra_exmods(): + del EXTRA_EXMODS[:] + + +def get_allowed_exmods(): + return ALLOWED_EXMODS + EXTRA_EXMODS + + +class JsonPayloadSerializer(messaging.NoOpSerializer): + @staticmethod + def serialize_entity(context, entity): + return jsonutils.to_primitive(entity, convert_instances=True) + + +class RequestContextSerializer(messaging.Serializer): + + def __init__(self, base): + self._base = base + + def serialize_entity(self, context, entity): + if not self._base: + return entity + return self._base.serialize_entity(context, entity) + + def deserialize_entity(self, context, entity): + if not self._base: + return entity + return self._base.deserialize_entity(context, entity) + + def serialize_context(self, context): + return context.to_dict() + + def deserialize_context(self, context): + return rack.context.RequestContext.from_dict(context) + + +def get_transport_url(url_str=None): + return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES) + + +def get_client(target, version_cap=None, serializer=None): + assert TRANSPORT is not None + serializer = RequestContextSerializer(serializer) + return messaging.RPCClient(TRANSPORT, + target, + version_cap=version_cap, + serializer=serializer) + + +def get_server(target, endpoints, serializer=None): + assert TRANSPORT is not None + serializer = RequestContextSerializer(serializer) + return messaging.get_rpc_server(TRANSPORT, + target, + endpoints, + executor='eventlet', + serializer=serializer) + + +def get_notifier(service=None, host=None, publisher_id=None): + assert NOTIFIER is not None + if not publisher_id: + publisher_id = "%s.%s" % (service, host or CONF.host) + return NOTIFIER.prepare(publisher_id=publisher_id) diff --git a/rack/safe_utils.py b/rack/safe_utils.py new file mode 100644 index 0000000..42320d6 --- /dev/null +++ b/rack/safe_utils.py @@ -0,0 +1,50 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities and helper functions that won't produce circular imports.""" + +import inspect + + +def getcallargs(function, *args, **kwargs): + """This is a simplified inspect.getcallargs (2.7+). + + It should be replaced when python >= 2.7 is standard. + """ + keyed_args = {} + argnames, varargs, keywords, defaults = inspect.getargspec(function) + + keyed_args.update(kwargs) + + #NOTE(alaski) the implicit 'self' or 'cls' argument shows up in + # argnames but not in args or kwargs. Uses 'in' rather than '==' because + # some tests use 'self2'. + if 'self' in argnames[0] or 'cls' == argnames[0]: + # The function may not actually be a method or have im_self. + # Typically seen when it's stubbed with mox. + if inspect.ismethod(function) and hasattr(function, 'im_self'): + keyed_args[argnames[0]] = function.im_self + else: + keyed_args[argnames[0]] = None + + remaining_argnames = filter(lambda x: x not in keyed_args, argnames) + keyed_args.update(dict(zip(remaining_argnames, args))) + + if defaults: + num_defaults = len(defaults) + for argname, value in zip(argnames[-num_defaults:], defaults): + if argname not in keyed_args: + keyed_args[argname] = value + + return keyed_args diff --git a/rack/scheduler/__init__.py b/rack/scheduler/__init__.py new file mode 100644 index 0000000..f7bae91 --- /dev/null +++ b/rack/scheduler/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +:mod:`rack.scheduler` -- Scheduler Nodes +===================================================== + +.. automodule:: rack.scheduler + :platform: Unix + :synopsis: Module that picks a compute node to run a VM instance. +.. moduleauthor:: Sandy Walsh +.. moduleauthor:: Ed Leafe +.. moduleauthor:: Chris Behrens +""" diff --git a/rack/scheduler/chance.py b/rack/scheduler/chance.py new file mode 100644 index 0000000..925bf5b --- /dev/null +++ b/rack/scheduler/chance.py @@ -0,0 +1,65 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Chance (Random) Scheduler implementation +""" + +import random + +from oslo.config import cfg + +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.scheduler import driver + +CONF = cfg.CONF +CONF.import_opt('resourceoperator_topic', 'rack.resourceoperator.rpcapi') + + +class ChanceScheduler(driver.Scheduler): + """Implements Scheduler as a random node selector.""" + + def __init__(self, *args, **kwargs): + super(ChanceScheduler, self).__init__(*args, **kwargs) + + def _filter_hosts(self, request_spec, hosts, filter_properties): + """Filter a list of hosts based on request_spec.""" + + ignore_hosts = filter_properties.get('ignore_hosts', []) + hosts = [host for host in hosts if host not in ignore_hosts] + return hosts + + def _schedule(self, context, topic, request_spec, filter_properties): + """Picks a host that is up at random.""" + + elevated = context.elevated() + hosts = self.hosts_up(elevated, topic) + if not hosts: + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) + + hosts = self._filter_hosts(request_spec, hosts, filter_properties) + if not hosts: + msg = _("Could not find another resourceoperator") + raise exception.NoValidHost(reason=msg) + + return random.choice(hosts) + + def select_destinations(self, context, request_spec, filter_properties): + """Selects random destinations.""" + host = self._schedule(context, CONF.resourceoperator_topic, + request_spec, filter_properties) + host_state = dict(host=host, nodename=None, limits=None) + + return host_state diff --git a/rack/scheduler/driver.py b/rack/scheduler/driver.py new file mode 100644 index 0000000..8403f1c --- /dev/null +++ b/rack/scheduler/driver.py @@ -0,0 +1,69 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Scheduler base class that all Schedulers should inherit from +""" + +from oslo.config import cfg + +from rack import db +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack import servicegroup + +LOG = logging.getLogger(__name__) + +scheduler_driver_opts = [ + # TODO: If we use intelligent scheduler driver like filter_scheduler, use this. + #cfg.StrOpt('scheduler_host_manager', + # default='rack.scheduler.host_manager.HostManager', + # help='The scheduler host manager class to use'), + cfg.IntOpt('scheduler_max_attempts', + default=3, + help='Maximum number of attempts to schedule an instance'), + ] + +CONF = cfg.CONF +CONF.register_opts(scheduler_driver_opts) + + +class Scheduler(object): + """The base class that all Scheduler classes should inherit from.""" + + def __init__(self): + # TODO: If we use intelligent scheduler driver like filter_scheduler, use this. + #self.host_manager = importutils.import_object( + # CONF.scheduler_host_manager) + self.servicegroup_api = servicegroup.API() + + def run_periodic_tasks(self, context): + """Manager calls this so drivers can perform periodic tasks.""" + pass + + def hosts_up(self, context, topic): + """Return the list of hosts that have a running service for topic.""" + + services = db.service_get_all_by_topic(context, topic) + return [service['host'] + for service in services + if self.servicegroup_api.service_is_up(service)] + + def select_destinations(self, context, request_spec, filter_properties): + """Must override select_destinations method. + + :return: A list of dicts with 'host', 'nodename' and 'limits' as keys + that satisfies the request_spec and filter_properties. + """ + msg = _("Driver must implement select_destinations") + raise NotImplementedError(msg) diff --git a/rack/scheduler/manager.py b/rack/scheduler/manager.py new file mode 100644 index 0000000..b890980 --- /dev/null +++ b/rack/scheduler/manager.py @@ -0,0 +1,69 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Scheduler Service +""" + +from oslo.config import cfg +from oslo import messaging + +from rack.resourceoperator import rpcapi +from rack import exception +from rack import manager +from rack.openstack.common import importutils +from rack.openstack.common import jsonutils +from rack.openstack.common import log as logging + + +LOG = logging.getLogger(__name__) + +scheduler_driver_opts = [ + cfg.StrOpt('scheduler_driver', + default='rack.scheduler.chance.ChanceScheduler', + help='Default driver to use for the scheduler'), + cfg.IntOpt('scheduler_driver_task_period', + default=60, + help='How often (in seconds) to run periodic tasks in ' + 'the scheduler driver of your choice. ' + 'Please note this is likely to interact with the value ' + 'of service_down_time, but exactly how they interact ' + 'will depend on your choice of scheduler driver.'), +] +CONF = cfg.CONF +CONF.register_opts(scheduler_driver_opts) + + +class SchedulerManager(manager.Manager): + + target = messaging.Target(version='1.0') + + def __init__(self, scheduler_driver=None, *args, **kwargs): + if not scheduler_driver: + scheduler_driver = CONF.scheduler_driver + self.driver = importutils.import_object(scheduler_driver) + self.resourceoperator_rpcapi = rpcapi.ResourceOperatorAPI() + super(SchedulerManager, self).__init__(service_name='scheduler', + *args, **kwargs) + + @messaging.expected_exceptions(exception.NoValidHost) + def select_destinations(self, context, request_spec, filter_properties): + """Returns destinations(s) best suited for this request_spec and + filter_properties. + + The result should be a list of dicts with 'host', 'nodename' and + 'limits' as keys. + """ + dests = self.driver.select_destinations(context, request_spec, + filter_properties) + return jsonutils.to_primitive(dests) diff --git a/rack/scheduler/rpcapi.py b/rack/scheduler/rpcapi.py new file mode 100644 index 0000000..c0428c0 --- /dev/null +++ b/rack/scheduler/rpcapi.py @@ -0,0 +1,63 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Client side of the scheduler manager RPC API. +""" + +from oslo.config import cfg +from oslo import messaging + +from rack import object as rack_object +from rack import rpc + +rpcapi_opts = [ + cfg.StrOpt('scheduler_topic', + default='scheduler', + help='The topic scheduler nodes listen on'), +] + +CONF = cfg.CONF +CONF.register_opts(rpcapi_opts) + +rpcapi_cap_opt = cfg.StrOpt('scheduler', + help='Set a version cap for messages sent to scheduler services') +CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') + + +class SchedulerAPI(object): + '''Client side of the scheduler rpc API. + + API version history: + + 1.0 - Initial version. + ''' + + VERSION_ALIASES = { + 'juno': '1.0', + } + + def __init__(self): + super(SchedulerAPI, self).__init__() + target = messaging.Target(topic=CONF.scheduler_topic, version='1.0') + version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.scheduler, + CONF.upgrade_levels.scheduler) + serializer = rack_object.RackObjectSerializer() + self.client = rpc.get_client(target, version_cap=version_cap, + serializer=serializer) + + def select_destinations(self, ctxt, request_spec, filter_properties): + cctxt = self.client.prepare() + return cctxt.call(ctxt, 'select_destinations', + request_spec=request_spec, filter_properties=filter_properties) diff --git a/rack/scheduler/utils.py b/rack/scheduler/utils.py new file mode 100644 index 0000000..9bbba4b --- /dev/null +++ b/rack/scheduler/utils.py @@ -0,0 +1,169 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utility methods for scheduling.""" + +import sys + +from rack.compute import flavors +from rack.compute import utils as compute_utils +from rack import db +from rack import notifications +from rack.objects import base as obj_base +from rack.objects import instance as instance_obj +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import jsonutils +from rack.openstack.common import log as logging +from rack import rpc + +LOG = logging.getLogger(__name__) + + +def build_request_spec(ctxt, image, instances, instance_type=None): + """Build a request_spec for the scheduler. + + The request_spec assumes that all instances to be scheduled are the same + type. + """ + instance = instances[0] + if isinstance(instance, instance_obj.Instance): + instance = obj_base.obj_to_primitive(instance) + + if instance_type is None: + instance_type = flavors.extract_flavor(instance) + # NOTE(comstud): This is a bit ugly, but will get cleaned up when + # we're passing an InstanceType internal object. + extra_specs = db.flavor_extra_specs_get(ctxt, instance_type['flavorid']) + instance_type['extra_specs'] = extra_specs + request_spec = { + 'image': image or {}, + 'instance_properties': instance, + 'instance_type': instance_type, + 'num_instances': len(instances), + # NOTE(alaski): This should be removed as logic moves from the + # scheduler to conductor. Provides backwards compatibility now. + 'instance_uuids': [inst['uuid'] for inst in instances]} + return jsonutils.to_primitive(request_spec) + + +def set_vm_state_and_notify(context, service, method, updates, ex, + request_spec, db): + """changes VM state and notifies.""" + LOG.warning(_("Failed to %(service)s_%(method)s: %(ex)s"), + {'service': service, 'method': method, 'ex': ex}) + + vm_state = updates['vm_state'] + properties = request_spec.get('instance_properties', {}) + # NOTE(vish): We shouldn't get here unless we have a catastrophic + # failure, so just set all instances to error. if uuid + # is not set, instance_uuids will be set to [None], this + # is solely to preserve existing behavior and can + # be removed along with the 'if instance_uuid:' if we can + # verify that uuid is always set. + uuids = [properties.get('uuid')] + from rack.conductor import api as conductor_api + conductor = conductor_api.LocalAPI() + notifier = rpc.get_notifier(service) + for instance_uuid in request_spec.get('instance_uuids') or uuids: + if instance_uuid: + state = vm_state.upper() + LOG.warning(_('Setting instance to %s state.'), state, + instance_uuid=instance_uuid) + + # update instance state and notify on the transition + (old_ref, new_ref) = db.instance_update_and_get_original( + context, instance_uuid, updates) + notifications.send_update(context, old_ref, new_ref, + service=service) + compute_utils.add_instance_fault_from_exc(context, + conductor, + new_ref, ex, sys.exc_info()) + + payload = dict(request_spec=request_spec, + instance_properties=properties, + instance_id=instance_uuid, + state=vm_state, + method=method, + reason=ex) + + event_type = '%s.%s' % (service, method) + notifier.error(context, event_type, payload) + + +def populate_filter_properties(filter_properties, host_state): + """Add additional information to the filter properties after a node has + been selected by the scheduling process. + """ + if isinstance(host_state, dict): + host = host_state['host'] + nodename = host_state['nodename'] + limits = host_state['limits'] + else: + host = host_state.host + nodename = host_state.nodename + limits = host_state.limits + + # Adds a retry entry for the selected compute host and node: + _add_retry_host(filter_properties, host, nodename) + + # Adds oversubscription policy + if not filter_properties.get('force_hosts'): + filter_properties['limits'] = limits + + +def _add_retry_host(filter_properties, host, node): + """Add a retry entry for the selected compute node. In the event that + the request gets re-scheduled, this entry will signal that the given + node has already been tried. + """ + retry = filter_properties.get('retry', None) + force_hosts = filter_properties.get('force_hosts', []) + force_nodes = filter_properties.get('force_nodes', []) + if not retry or force_hosts or force_nodes: + return + hosts = retry['hosts'] + hosts.append([host, node]) + + +def parse_options(opts, sep='=', converter=str, name=""): + """Parse a list of options, each in the format of . Also + use the converter to convert the value into desired type. + + :params opts: list of options, e.g. from oslo.config.cfg.ListOpt + :params sep: the separator + :params converter: callable object to convert the value, should raise + ValueError for conversion failure + :params name: name of the option + + :returns: a lists of tuple of values (key, converted_value) + """ + good = [] + bad = [] + for opt in opts: + try: + key, seen_sep, value = opt.partition(sep) + value = converter(value) + except ValueError: + key = None + value = None + if key and seen_sep and value is not None: + good.append((key, value)) + else: + bad.append(opt) + if bad: + LOG.warn(_("Ignoring the invalid elements of the option " + "%(name)s: %(options)s"), + {'name': name, + 'options': ", ".join(bad)}) + return good diff --git a/rack/service.py b/rack/service.py new file mode 100644 index 0000000..8d7cc55 --- /dev/null +++ b/rack/service.py @@ -0,0 +1,373 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import os +import random +import sys + +from oslo.config import cfg +from oslo import messaging + +from rack import baserpc +from rack import context +from rack import db +from rack import debugger +from rack import exception +from rack import object as rack_object +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import importutils +from rack.openstack.common import log as logging +from rack.openstack.common import service +from rack import rpc +from rack import servicegroup +from rack import utils +from rack import version +from rack import wsgi + +LOG = logging.getLogger(__name__) + +service_opts = [ + cfg.IntOpt('report_interval', + default=10, + help='Seconds between nodes reporting state to datastore'), + cfg.BoolOpt('periodic_enable', + default=True, + help='Enable periodic tasks'), + cfg.IntOpt('periodic_fuzzy_delay', + default=60, + help='Range of seconds to randomly delay when starting the' + ' periodic task scheduler to reduce stampeding.' + ' (Disable by setting to 0)'), + cfg.ListOpt('enabled_apis', + default=['rackapi'], + help='A list of APIs to enable by default'), + cfg.ListOpt('enabled_ssl_apis', + default=[], + help='A list of APIs with enabled SSL'), + cfg.StrOpt('rackapi_listen', + default="0.0.0.0", + help='The IP address on which the OpenStack API will listen.'), + cfg.IntOpt('rackapi_listen_port', + default=8088, + help='The port on which the OpenStack API will listen.'), + cfg.IntOpt('rackapi_workers', + help='Number of workers for OpenStack API service. The default ' + 'will be the number of CPUs available.'), + cfg.StrOpt('scheduler_manager', + default='rack.scheduler.manager.SchedulerManager', + help='Full class name for the Manager for scheduler'), + cfg.StrOpt('resourceoperator_manager', + default='rack.resourceoperator.manager.ResourceOperatorManager', + help='Full class name for the Manager for resource_operator'), + cfg.IntOpt('service_down_time', + default=60, + help='Maximum time since last check-in for up service'), + ] + +CONF = cfg.CONF +CONF.register_opts(service_opts) +CONF.import_opt('host', 'rack.netconf') + + +class Service(service.Service): + """Service object for binaries running on hosts. + + A service takes a manager and enables rpc by listening to queues based + on topic. It also periodically runs tasks on the manager and reports + it state to the database services table. + """ + + def __init__(self, host, binary, topic, manager, report_interval=None, + periodic_enable=None, periodic_fuzzy_delay=None, + periodic_interval_max=None, db_allowed=True, + *args, **kwargs): + super(Service, self).__init__() + self.host = host + self.binary = binary + self.topic = topic + self.manager_class_name = manager + self.servicegroup_api = servicegroup.API(db_allowed=db_allowed) + manager_class = importutils.import_class(self.manager_class_name) + self.manager = manager_class(host=self.host, *args, **kwargs) + self.rpcserver = None + self.report_interval = report_interval + self.periodic_enable = periodic_enable + self.periodic_fuzzy_delay = periodic_fuzzy_delay + self.periodic_interval_max = periodic_interval_max + self.saved_args, self.saved_kwargs = args, kwargs + self.backdoor_port = None + + def start(self): + verstr = version.version_string_with_package() + LOG.audit(_('Starting %(topic)s node (version %(version)s)'), + {'topic': self.topic, 'version': verstr}) + self.basic_config_check() + self.manager.init_host() + self.model_disconnected = False + ctxt = context.get_admin_context() + try: + self.service_ref = db.service_get_by_args(ctxt, + self.host, self.binary) + self.service_id = self.service_ref['id'] + except exception.NotFound: + try: + self.service_ref = self._create_service_ref(ctxt) + except exception.ServiceTopicExists: + self.service_ref = db.service_get_by_args(ctxt, + self.host, self.binary) + + self.manager.pre_start_hook() + + LOG.debug(_("Join ServiceGroup membership for this service %s") + % self.topic) + self.servicegroup_api.join(self.host, self.topic, self) + + if self.backdoor_port is not None: + self.manager.backdoor_port = self.backdoor_port + + LOG.debug(_("Creating RPC server for service %s") % self.topic) + + target = messaging.Target(topic=self.topic, server=self.host) + + endpoints = [ + self.manager, + baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port) + ] + endpoints.extend(self.manager.additional_endpoints) + + serializer = rack_object.RackObjectSerializer() + + self.rpcserver = rpc.get_server(target, endpoints, serializer) + self.rpcserver.start() + + self.manager.post_start_hook() + + if self.periodic_enable: + if self.periodic_fuzzy_delay: + initial_delay = random.randint(0, self.periodic_fuzzy_delay) + else: + initial_delay = None + + self.tg.add_dynamic_timer(self.periodic_tasks, + initial_delay=initial_delay, + periodic_interval_max= + self.periodic_interval_max) + + def _create_service_ref(self, context): + svc_values = { + 'host': self.host, + 'binary': self.binary, + 'topic': self.topic, + 'report_count': 0 + } + service = db.service_create(context, svc_values) + self.service_id = service['id'] + return service + + def __getattr__(self, key): + manager = self.__dict__.get('manager', None) + return getattr(manager, key) + + @classmethod + def create(cls, host=None, binary=None, topic=None, manager=None, + report_interval=None, periodic_enable=None, + periodic_fuzzy_delay=None, periodic_interval_max=None, + db_allowed=True): + """Instantiates class and passes back application object. + + :param host: defaults to CONF.host + :param binary: defaults to basename of executable + :param topic: defaults to bin_name - 'rack-' part + :param manager: defaults to CONF._manager + :param report_interval: defaults to CONF.report_interval + :param periodic_enable: defaults to CONF.periodic_enable + :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay + :param periodic_interval_max: if set, the max time to wait between runs + + """ + if not host: + host = CONF.host + if not binary: + binary = os.path.basename(sys.argv[0]) + if not topic: + topic = binary.rpartition('rack-')[2] + if not manager: + manager_cls = ('%s_manager' % + binary.rpartition('rack-')[2]) + manager = CONF.get(manager_cls, None) + if report_interval is None: + report_interval = CONF.report_interval + if periodic_enable is None: + periodic_enable = CONF.periodic_enable + if periodic_fuzzy_delay is None: + periodic_fuzzy_delay = CONF.periodic_fuzzy_delay + + debugger.init() + + service_obj = cls(host, binary, topic, manager, + report_interval=report_interval, + periodic_enable=periodic_enable, + periodic_fuzzy_delay=periodic_fuzzy_delay, + periodic_interval_max=periodic_interval_max, + db_allowed=db_allowed) + + return service_obj + + def kill(self): + """Destroy the service object in the datastore.""" + self.stop() + + def stop(self): + try: + self.rpcserver.stop() + self.rpcserver.wait() + except Exception: + pass + + try: + self.manager.cleanup_host() + except Exception: + LOG.exception(_('Service error occurred during cleanup_host')) + pass + + super(Service, self).stop() + + def periodic_tasks(self, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + ctxt = context.get_admin_context() + return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) + + def basic_config_check(self): + """Perform basic config checks before starting processing.""" + # Make sure the tempdir exists and is writable + try: + with utils.tempdir(): + pass + except Exception as e: + LOG.error(_('Temporary directory is invalid: %s'), e) + sys.exit(1) + + +class WSGIService(object): + """Provides ability to launch API from a 'paste' configuration.""" + + def __init__(self, name, loader=None, use_ssl=False, max_url_len=None): + """Initialize, but do not start the WSGI server. + + :param name: The name of the WSGI server given to the loader. + :param loader: Loads the WSGI application using the given name. + :returns: None + + """ + self.name = name + self.manager = self._get_manager() + self.loader = loader or wsgi.Loader() + self.app = self.loader.load_app(name) + self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") + self.port = getattr(CONF, '%s_listen_port' % name, 0) + self.workers = (getattr(CONF, '%s_workers' % name, None) or + utils.cpu_count()) + if self.workers and self.workers < 1: + worker_name = '%s_workers' % name + msg = (_("%(worker_name)s value of %(workers)s is invalid, " + "must be greater than 0") % + {'worker_name': worker_name, + 'workers': str(self.workers)}) + raise exception.InvalidInput(msg) + self.use_ssl = use_ssl + self.server = wsgi.Server(name, + self.app, + host=self.host, + port=self.port, + use_ssl=self.use_ssl, + max_url_len=max_url_len) + # Pull back actual port used + self.port = self.server.port + self.backdoor_port = None + + def _get_manager(self): + """Initialize a Manager object appropriate for this service. + + Use the service name to look up a Manager subclass from the + configuration and initialize an instance. If no class name + is configured, just return None. + + :returns: a Manager instance, or None. + + """ + fl = '%s_manager' % self.name + if fl not in CONF: + return None + + manager_class_name = CONF.get(fl, None) + if not manager_class_name: + return None + + manager_class = importutils.import_class(manager_class_name) + return manager_class() + + def start(self): + """Start serving this service using loaded configuration. + + Also, retrieve updated port number in case '0' was passed in, which + indicates a random port should be used. + + :returns: None + + """ + if self.manager: + self.manager.init_host() + self.manager.pre_start_hook() + if self.backdoor_port is not None: + self.manager.backdoor_port = self.backdoor_port + self.server.start() + if self.manager: + self.manager.post_start_hook() + + def stop(self): + """Stop serving this API. + + :returns: None + + """ + self.server.stop() + + def wait(self): + """Wait for the service to stop serving this API. + + :returns: None + + """ + self.server.wait() + + +def process_launcher(): + return service.ProcessLauncher() + + +_launcher = None + + +def serve(server, workers=None): + global _launcher + if _launcher: + raise RuntimeError(_('serve() can only be called once')) + + _launcher = service.launch(server, workers=workers) + + +def wait(): + _launcher.wait() diff --git a/rack/servicegroup/__init__.py b/rack/servicegroup/__init__.py new file mode 100644 index 0000000..f46ffaa --- /dev/null +++ b/rack/servicegroup/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The membership service for Rack. Different implementations can be plugged +according to the Rack configuration. +""" + +from rack.servicegroup import api + +API = api.API diff --git a/rack/servicegroup/api.py b/rack/servicegroup/api.py new file mode 100644 index 0000000..d472e32 --- /dev/null +++ b/rack/servicegroup/api.py @@ -0,0 +1,167 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Define APIs for the servicegroup access.""" + +import random + +from oslo.config import cfg + +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import importutils +from rack.openstack.common import log as logging +from rack import utils + +LOG = logging.getLogger(__name__) +_default_driver = 'db' +servicegroup_driver_opt = cfg.StrOpt('servicegroup_driver', + default=_default_driver, + help='The driver for servicegroup ' + 'service (valid options are: ' + 'db, zk, mc)') + +CONF = cfg.CONF +CONF.register_opt(servicegroup_driver_opt) + +INITIAL_REPORTING_DELAY = 5 + + +class API(object): + + _driver = None + _driver_name_class_mapping = { + 'db': 'rack.servicegroup.drivers.db.DbDriver', + 'zk': 'rack.servicegroup.drivers.zk.ZooKeeperDriver', + 'mc': 'rack.servicegroup.drivers.mc.MemcachedDriver' + } + + def __new__(cls, *args, **kwargs): + '''Create an instance of the servicegroup API. + + args and kwargs are passed down to the servicegroup driver when it gets + created. No args currently exist, though. Valid kwargs are: + + db_allowed - Boolean. False if direct db access is not allowed and + alternative data access (conductor) should be used + instead. + ''' + + if not cls._driver: + LOG.debug(_('ServiceGroup driver defined as an instance of %s'), + str(CONF.servicegroup_driver)) + driver_name = CONF.servicegroup_driver + try: + driver_class = cls._driver_name_class_mapping[driver_name] + except KeyError: + raise TypeError(_("unknown ServiceGroup driver name: %s") + % driver_name) + cls._driver = importutils.import_object(driver_class, + *args, **kwargs) + utils.check_isinstance(cls._driver, ServiceGroupDriver) + # we don't have to check that cls._driver is not NONE, + # check_isinstance does it + return super(API, cls).__new__(cls) + + def __init__(self, *args, **kwargs): + self.basic_config_check() + + def basic_config_check(self): + """Perform basic config check.""" + # Make sure report interval is less than service down time + report_interval = CONF.report_interval + if CONF.service_down_time <= report_interval: + new_service_down_time = int(report_interval * 2.5) + LOG.warn(_("Report interval must be less than service down " + "time. Current config: . Setting service_down_time to: " + "%(new_service_down_time)s"), + {'service_down_time': CONF.service_down_time, + 'report_interval': report_interval, + 'new_service_down_time': new_service_down_time}) + CONF.set_override('service_down_time', new_service_down_time) + + def join(self, member_id, group_id, service=None): + """Add a new member to the ServiceGroup + + @param member_id: the joined member ID + @param group_id: the group name, of the joined member + @param service: the parameter can be used for notifications about + disconnect mode and update some internals + """ + msg = _('Join new ServiceGroup member %(member_id)s to the ' + '%(group_id)s group, service = %(service)s') + LOG.debug(msg, {'member_id': member_id, 'group_id': group_id, + 'service': service}) + return self._driver.join(member_id, group_id, service) + + def service_is_up(self, member): + """Check if the given member is up.""" + # NOTE(johngarbutt) no logging in this method, + # so this doesn't slow down the scheduler + return self._driver.is_up(member) + + def leave(self, member_id, group_id): + """Explicitly remove the given member from the ServiceGroup + monitoring. + """ + msg = _('Explicitly remove the given member %(member_id)s from the' + '%(group_id)s group monitoring') + LOG.debug(msg, {'member_id': member_id, 'group_id': group_id}) + return self._driver.leave(member_id, group_id) + + def get_all(self, group_id): + """Returns ALL members of the given group.""" + LOG.debug(_('Returns ALL members of the [%s] ' + 'ServiceGroup'), group_id) + return self._driver.get_all(group_id) + + def get_one(self, group_id): + """Returns one member of the given group. The strategy to select + the member is decided by the driver (e.g. random or round-robin). + """ + LOG.debug(_('Returns one member of the [%s] group'), group_id) + return self._driver.get_one(group_id) + + +class ServiceGroupDriver(object): + """Base class for ServiceGroup drivers.""" + + def join(self, member_id, group_id, service=None): + """Join the given service with it's group.""" + raise NotImplementedError() + + def is_up(self, member): + """Check whether the given member is up.""" + raise NotImplementedError() + + def leave(self, member_id, group_id): + """Remove the given member from the ServiceGroup monitoring.""" + raise NotImplementedError() + + def get_all(self, group_id): + """Returns ALL members of the given group.""" + raise NotImplementedError() + + def get_one(self, group_id): + """The default behavior of get_one is to randomly pick one from + the result of get_all(). This is likely to be overridden in the + actual driver implementation. + """ + members = self.get_all(group_id) + if members is None: + return None + length = len(members) + if length == 0: + return None + return random.choice(members) diff --git a/rack/servicegroup/drivers/__init__.py b/rack/servicegroup/drivers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/servicegroup/drivers/db.py b/rack/servicegroup/drivers/db.py new file mode 100644 index 0000000..9efcd7c --- /dev/null +++ b/rack/servicegroup/drivers/db.py @@ -0,0 +1,100 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg +import six + +from rack import context +from rack import db +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import timeutils +from rack.servicegroup import api + + +CONF = cfg.CONF +CONF.import_opt('service_down_time', 'rack.service') + +LOG = logging.getLogger(__name__) + + +class DbDriver(api.ServiceGroupDriver): + + def __init__(self, *args, **kwargs): + self.db_allowed = kwargs.get('db_allowed', True) + self.service_down_time = CONF.service_down_time + + def join(self, member_id, group_id, service=None): + """Join the given service with it's group.""" + + msg = _('DB_Driver: join new ServiceGroup member %(member_id)s to ' + 'the %(group_id)s group, service = %(service)s') + LOG.debug(msg, {'member_id': member_id, 'group_id': group_id, + 'service': service}) + if service is None: + raise RuntimeError(_('service is a mandatory argument for DB based' + ' ServiceGroup driver')) + report_interval = service.report_interval + if report_interval: + service.tg.add_timer(report_interval, self._report_state, + api.INITIAL_REPORTING_DELAY, service) + + def is_up(self, service_ref): + """Moved from rack.utils + Check whether a service is up based on last heartbeat. + """ + last_heartbeat = service_ref['updated_at'] or service_ref['created_at'] + if isinstance(last_heartbeat, six.string_types): + last_heartbeat = timeutils.parse_strtime(last_heartbeat) + else: + last_heartbeat = last_heartbeat.replace(tzinfo=None) + elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) + is_up = abs(elapsed) <= self.service_down_time + if not is_up: + msg = _('Seems service is down. Last heartbeat was %(lhb)s. ' + 'Elapsed time is %(el)s') + LOG.debug(msg, {'lhb': str(last_heartbeat), 'el': str(elapsed)}) + return is_up + + def get_all(self, group_id): + """Returns ALL members of the given group + """ + LOG.debug(_('DB_Driver: get_all members of the %s group') % group_id) + rs = [] + ctxt = context.get_admin_context() + services = db.service_get_all_by_topic(ctxt, group_id) + for service in services: + if self.is_up(service): + rs.append(service['host']) + return rs + + def _report_state(self, service): + """Update the state of this service in the datastore.""" + ctxt = context.get_admin_context() + state_catalog = {} + try: + report_count = service.service_ref['report_count'] + 1 + state_catalog['report_count'] = report_count + + service.service_ref = db.service_update(ctxt, + service.service_ref['id'], state_catalog) + + if getattr(service, 'model_disconnected', False): + service.model_disconnected = False + LOG.error(_('Recovered model server connection!')) + + except Exception: + if not getattr(service, 'model_disconnected', False): + service.model_disconnected = True + LOG.exception(_('model server went away')) diff --git a/rack/servicegroup/drivers/mc.py b/rack/servicegroup/drivers/mc.py new file mode 100644 index 0000000..d2f8870 --- /dev/null +++ b/rack/servicegroup/drivers/mc.py @@ -0,0 +1,99 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from oslo.config import cfg + +from rack import context +from rack import db +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import memorycache +from rack.openstack.common import timeutils +from rack.servicegroup import api + + +CONF = cfg.CONF +CONF.import_opt('service_down_time', 'rack.service') +CONF.import_opt('memcached_servers', 'rack.openstack.common.memorycache') + + +LOG = logging.getLogger(__name__) + + +class MemcachedDriver(api.ServiceGroupDriver): + + def __init__(self, *args, **kwargs): + test = kwargs.get('test') + if not CONF.memcached_servers and not test: + raise RuntimeError(_('memcached_servers not defined')) + self.mc = memorycache.get_client() + self.db_allowed = kwargs.get('db_allowed', True) + + def join(self, member_id, group_id, service=None): + """Join the given service with its group.""" + + msg = _('Memcached_Driver: join new ServiceGroup member ' + '%(member_id)s to the %(group_id)s group, ' + 'service = %(service)s') + LOG.debug(msg, {'member_id': member_id, 'group_id': group_id, + 'service': service}) + if service is None: + raise RuntimeError(_('service is a mandatory argument for ' + 'Memcached based ServiceGroup driver')) + report_interval = service.report_interval + if report_interval: + service.tg.add_timer(report_interval, self._report_state, + api.INITIAL_REPORTING_DELAY, service) + + def is_up(self, service_ref): + """Moved from rack.utils + Check whether a service is up based on last heartbeat. + """ + key = "%(topic)s:%(host)s" % service_ref + return self.mc.get(str(key)) is not None + + def get_all(self, group_id): + """Returns ALL members of the given group + """ + LOG.debug(_('Memcached_Driver: get_all members of the %s group') % + group_id) + rs = [] + ctxt = context.get_admin_context() + services = db.service_get_all_by_topic(ctxt, group_id) + for service in services: + if self.is_up(service): + rs.append(service['host']) + return rs + + def _report_state(self, service): + """Update the state of this service in the datastore.""" + ctxt = context.get_admin_context() + try: + key = "%(topic)s:%(host)s" % service.service_ref + # memcached has data expiration time capability. + # set(..., time=CONF.service_down_time) uses it and + # reduces key-deleting code. + self.mc.set(str(key), + timeutils.utcnow(), + time=CONF.service_down_time) + + # TODO(termie): make this pattern be more elegant. + if getattr(service, 'model_disconnected', False): + service.model_disconnected = False + LOG.error(_('Recovered model server connection!')) + + # TODO(vish): this should probably only catch connection errors + except Exception: # pylint: disable=W0702 + if not getattr(service, 'model_disconnected', False): + service.model_disconnected = True + LOG.exception(_('model server went away')) diff --git a/rack/servicegroup/drivers/zk.py b/rack/servicegroup/drivers/zk.py new file mode 100644 index 0000000..4eaa392 --- /dev/null +++ b/rack/servicegroup/drivers/zk.py @@ -0,0 +1,155 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +import eventlet +from oslo.config import cfg + +from rack import exception +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import importutils +from rack.openstack.common import log as logging +from rack.openstack.common import loopingcall +from rack.servicegroup import api + +evzookeeper = importutils.try_import('evzookeeper') +membership = importutils.try_import('evzookeeper.membership') +zookeeper = importutils.try_import('zookeeper') + +zk_driver_opts = [ + cfg.StrOpt('address', + help='The ZooKeeper addresses for servicegroup service in the ' + 'format of host1:port,host2:port,host3:port'), + cfg.IntOpt('recv_timeout', + default=4000, + help='The recv_timeout parameter for the zk session'), + cfg.StrOpt('sg_prefix', + default="/servicegroups", + help='The prefix used in ZooKeeper to store ephemeral nodes'), + cfg.IntOpt('sg_retry_interval', + default=5, + help='Number of seconds to wait until retrying to join the ' + 'session'), + ] + +CONF = cfg.CONF +CONF.register_opts(zk_driver_opts, group="zookeeper") + +LOG = logging.getLogger(__name__) + + +class ZooKeeperDriver(api.ServiceGroupDriver): + """ZooKeeper driver for the service group API.""" + + def __init__(self, *args, **kwargs): + """Create the zk session object.""" + if not all([evzookeeper, membership, zookeeper]): + raise ImportError('zookeeper module not found') + null = open(os.devnull, "w") + self._session = evzookeeper.ZKSession(CONF.zookeeper.address, + recv_timeout= + CONF.zookeeper.recv_timeout, + zklog_fd=null) + self._memberships = {} + self._monitors = {} + # Make sure the prefix exists + try: + self._session.create(CONF.zookeeper.sg_prefix, "", + acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE]) + except zookeeper.NodeExistsException: + pass + + super(ZooKeeperDriver, self).__init__() + + def join(self, member_id, group, service=None): + """Join the given service with its group.""" + LOG.debug(_('ZooKeeperDriver: join new member %(id)s to the ' + '%(gr)s group, service=%(sr)s'), + {'id': member_id, 'gr': group, 'sr': service}) + member = self._memberships.get((group, member_id), None) + if member is None: + # the first time to join. Generate a new object + path = "%s/%s" % (CONF.zookeeper.sg_prefix, group) + try: + member = membership.Membership(self._session, path, member_id) + except RuntimeError: + LOG.exception(_("Unable to join. It is possible that either " + "another node exists with the same name, or " + "this node just restarted. We will try " + "again in a short while to make sure.")) + eventlet.sleep(CONF.zookeeper.sg_retry_interval) + member = membership.Membership(self._session, path, member_id) + self._memberships[(group, member_id)] = member + return FakeLoopingCall(self, member_id, group) + + def leave(self, member_id, group): + """Remove the given member from the service group.""" + LOG.debug(_('ZooKeeperDriver.leave: %(member)s from group %(group)s'), + {'member': member_id, 'group': group}) + try: + key = (group, member_id) + member = self._memberships[key] + member.leave() + del self._memberships[key] + except KeyError: + LOG.error(_('ZooKeeperDriver.leave: %(id)s has not joined to the ' + '%(gr)s group'), {'id': member_id, 'gr': group}) + + def is_up(self, service_ref): + group_id = service_ref['topic'] + member_id = service_ref['host'] + all_members = self.get_all(group_id) + return member_id in all_members + + def get_all(self, group_id): + """Return all members in a list, or a ServiceGroupUnavailable + exception. + """ + monitor = self._monitors.get(group_id, None) + if monitor is None: + path = "%s/%s" % (CONF.zookeeper.sg_prefix, group_id) + monitor = membership.MembershipMonitor(self._session, path) + self._monitors[group_id] = monitor + # Note(maoy): When initialized for the first time, it takes a + # while to retrieve all members from zookeeper. To prevent + # None to be returned, we sleep 5 sec max to wait for data to + # be ready. + for _retry in range(50): + eventlet.sleep(0.1) + all_members = monitor.get_all() + if all_members is not None: + return all_members + all_members = monitor.get_all() + if all_members is None: + raise exception.ServiceGroupUnavailable(driver="ZooKeeperDriver") + return all_members + + +class FakeLoopingCall(loopingcall.LoopingCallBase): + """The fake Looping Call implementation, created for backward + compatibility with a membership based on DB. + """ + def __init__(self, driver, host, group): + self._driver = driver + self._group = group + self._host = host + + def stop(self): + self._driver.leave(self._host, self._group) + + def start(self, interval, initial_delay=None): + pass + + def wait(self): + pass diff --git a/rack/test.py b/rack/test.py new file mode 100644 index 0000000..0fb1484 --- /dev/null +++ b/rack/test.py @@ -0,0 +1,285 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base classes for our unit tests. + +Allows overriding of flags for use of fakes, and some black magic for +inline callbacks. + +""" + +import eventlet +eventlet.monkey_patch(os=False) + +import copy +import gettext +import logging +import os +import shutil +import sys +import uuid + +import fixtures +from oslo.config import cfg +from oslo.messaging import conffixture as messaging_conffixture +import testtools + +from rack.db import migration +from rack.db.sqlalchemy import api as session +from rack.openstack.common.fixture import logging as log_fixture +from rack.openstack.common.fixture import moxstubout +from rack.openstack.common import log as oslo_logging +from rack.openstack.common import timeutils +from rack import paths +from rack import rpc +from rack import service +from rack.tests import conf_fixture +from rack.tests import policy_fixture + + +test_opts = [ + cfg.StrOpt('sqlite_clean_db', + default='clean.sqlite', + help='File name of clean sqlite db'), + ] + +CONF = cfg.CONF +CONF.register_opts(test_opts) +CONF.import_opt('connection', + 'rack.openstack.common.db.options', + group='database') +CONF.import_opt('sqlite_db', 'rack.openstack.common.db.options', + group='database') +CONF.set_override('use_stderr', False) + +oslo_logging.setup('rack') + +_DB_CACHE = None +_TRUE_VALUES = ('True', 'true', '1', 'yes') + + +class Database(fixtures.Fixture): + + def __init__(self, db_session, db_migrate, sql_connection, + sqlite_db, sqlite_clean_db): + self.sql_connection = sql_connection + self.sqlite_db = sqlite_db + self.sqlite_clean_db = sqlite_clean_db + + self.engine = db_session.get_engine() + self.engine.dispose() + conn = self.engine.connect() + if sql_connection == "sqlite://": + if db_migrate.db_version() > db_migrate.db_initial_version(): + return + else: + testdb = paths.state_path_rel(sqlite_db) + if os.path.exists(testdb): + return + db_migrate.db_sync() + if sql_connection == "sqlite://": + conn = self.engine.connect() + self._DB = "".join(line for line in conn.connection.iterdump()) + self.engine.dispose() + else: + cleandb = paths.state_path_rel(sqlite_clean_db) + shutil.copyfile(testdb, cleandb) + + def setUp(self): + super(Database, self).setUp() + + if self.sql_connection == "sqlite://": + conn = self.engine.connect() + conn.connection.executescript(self._DB) + self.addCleanup(self.engine.dispose) + else: + shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db), + paths.state_path_rel(self.sqlite_db)) + + + +class ReplaceModule(fixtures.Fixture): + """Replace a module with a fake module.""" + + def __init__(self, name, new_value): + self.name = name + self.new_value = new_value + + def _restore(self, old_value): + sys.modules[self.name] = old_value + + def setUp(self): + super(ReplaceModule, self).setUp() + old_value = sys.modules.get(self.name) + sys.modules[self.name] = self.new_value + self.addCleanup(self._restore, old_value) + + +class ServiceFixture(fixtures.Fixture): + """Run a service as a test fixture.""" + + def __init__(self, name, host=None, **kwargs): + name = name + host = host and host or uuid.uuid4().hex + kwargs.setdefault('host', host) + kwargs.setdefault('binary', 'rack-%s' % name) + self.kwargs = kwargs + + def setUp(self): + super(ServiceFixture, self).setUp() + self.service = service.Service.create(**self.kwargs) + self.service.start() + self.addCleanup(self.service.kill) + + +class TranslationFixture(fixtures.Fixture): + """Use gettext NullTranslation objects in tests.""" + + def setUp(self): + super(TranslationFixture, self).setUp() + nulltrans = gettext.NullTranslations() + gettext_fixture = fixtures.MonkeyPatch('gettext.translation', + lambda *x, **y: nulltrans) + self.gettext_patcher = self.useFixture(gettext_fixture) + + +class TestingException(Exception): + pass + + +class TestCase(testtools.TestCase): + """Test case base class for all unit tests. + + Due to the slowness of DB access, please consider deriving from + `NoDBTestCase` first. + """ + USES_DB = True + + # NOTE(rpodolyaka): this attribute can be overridden in subclasses in order + # to scale the global test timeout value set for each + # test case separately. Use 0 value to disable timeout. + TIMEOUT_SCALING_FACTOR = 1 + + def setUp(self): + """Run before each test method to initialize test environment.""" + super(TestCase, self).setUp() + test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) + try: + test_timeout = int(test_timeout) + except ValueError: + # If timeout value is invalid do not set a timeout. + test_timeout = 0 + + if self.TIMEOUT_SCALING_FACTOR >= 0: + test_timeout *= self.TIMEOUT_SCALING_FACTOR + else: + raise ValueError('TIMEOUT_SCALING_FACTOR value must be >= 0') + + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + self.useFixture(fixtures.NestedTempfile()) + self.useFixture(fixtures.TempHomeDir()) + self.useFixture(TranslationFixture()) + self.useFixture(log_fixture.get_logging_handle_error_fixture()) + + if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + + rpc.add_extra_exmods('rack.test') + self.addCleanup(rpc.clear_extra_exmods) + self.addCleanup(rpc.cleanup) + + fs = '%(levelname)s [%(name)s] %(message)s' + self.log_fixture = self.useFixture(fixtures.FakeLogger( + level=logging.DEBUG, + format=fs)) + self.useFixture(conf_fixture.ConfFixture(CONF)) + + self.messaging_conf = messaging_conffixture.ConfFixture(CONF) + self.messaging_conf.transport_driver = 'fake' + self.messaging_conf.response_timeout = 15 + self.useFixture(self.messaging_conf) + + rpc.init(CONF) + + if self.USES_DB: + global _DB_CACHE + if not _DB_CACHE: + _DB_CACHE = Database(session, migration, + sql_connection=CONF.database.connection, + sqlite_db=CONF.database.sqlite_db, + sqlite_clean_db=CONF.sqlite_clean_db) + + self.useFixture(_DB_CACHE) + + mox_fixture = self.useFixture(moxstubout.MoxStubout()) + self.mox = mox_fixture.mox + self.stubs = mox_fixture.stubs + self.addCleanup(self._clear_attrs) + self.useFixture(fixtures.EnvironmentVariable('http_proxy')) + self.policy = self.useFixture(policy_fixture.PolicyFixture()) + CONF.set_override('fatal_exception_format_errors', True) + + def _clear_attrs(self): + # Delete attributes that don't start with _ so they don't pin + # memory around unnecessarily for the duration of the test + # suite + for key in [k for k in self.__dict__.keys() if k[0] != '_']: + del self.__dict__[key] + + def flags(self, **kw): + """Override flag variables for a test.""" + group = kw.pop('group', None) + for k, v in kw.iteritems(): + CONF.set_override(k, v, group) + + def start_service(self, name, host=None, **kwargs): + svc = self.useFixture(ServiceFixture(name, host, **kwargs)) + return svc.service + + +class APICoverage(object): + + cover_api = None + + def test_api_methods(self): + self.assertTrue(self.cover_api is not None) + api_methods = [x for x in dir(self.cover_api) + if not x.startswith('_')] + test_methods = [x[5:] for x in dir(self) + if x.startswith('test_')] + self.assertThat( + test_methods, + testtools.matchers.ContainsAll(api_methods)) + + +class TimeOverride(fixtures.Fixture): + """Fixture to start and remove time override.""" + + def setUp(self): + super(TimeOverride, self).setUp() + timeutils.set_time_override() + self.addCleanup(timeutils.clear_time_override) + + +class NoDBTestCase(TestCase): + """`NoDBTestCase` differs from TestCase in that DB access is not supported. + This makes tests run significantly faster. If possible, all new tests + should derive from this class. + """ + USES_DB = False diff --git a/rack/tests/__init__.py b/rack/tests/__init__.py new file mode 100644 index 0000000..884ee32 --- /dev/null +++ b/rack/tests/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys + +if ('eventlet' in sys.modules and + os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): + raise ImportError('eventlet imported before rack/cmd/__init__ ' + '(env var set to %s)' + % os.environ.get('EVENTLET_NO_GREENDNS')) + +os.environ['EVENTLET_NO_GREENDNS'] = 'yes' + +import eventlet + +eventlet.monkey_patch(os=False) diff --git a/rack/tests/api/__init__.py b/rack/tests/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/tests/api/fakes.py b/rack/tests/api/fakes.py new file mode 100644 index 0000000..39ba089 --- /dev/null +++ b/rack/tests/api/fakes.py @@ -0,0 +1,42 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paste.urlmap + +import rack.api +from rack.api import auth +from rack.api import v1 +from rack.api import versions +from rack import context +from rack.resourceoperator import rpcapi as operator_rpcapi +from rack.scheduler import rpcapi as scheduler_rpcapi + + +def wsgi_app(inner_app_v1=None, fake_auth_context=None, + use_no_auth=False): + if not inner_app_v1: + inner_app_v1 = v1.APIRouter() + + if use_no_auth: + api_v1 = rack.api.FaultWrapper(auth.NoAuthMiddleware(inner_app_v1)) + else: + if fake_auth_context is not None: + ctxt = fake_auth_context + else: + ctxt = context.RequestContext('fake', 'fake', auth_token=True) + api_v1 = rack.api.FaultWrapper(auth.InjectContext(ctxt, inner_app_v1)) + + mapper = paste.urlmap.URLMap() + mapper['/v1'] = api_v1 + mapper['/'] = rack.api.FaultWrapper(versions.Versions()) + return mapper diff --git a/rack/tests/api/v1/__init__.py b/rack/tests/api/v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/tests/api/v1/test_groups.py b/rack/tests/api/v1/test_groups.py new file mode 100644 index 0000000..362f2ea --- /dev/null +++ b/rack/tests/api/v1/test_groups.py @@ -0,0 +1,597 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from rack.api.v1 import groups +from rack import db +from rack import exception +from rack.openstack.common import jsonutils +from rack import test +from rack.tests.api import fakes + +import copy +import uuid +import webob + +GID = str(uuid.uuid4()) + +FAKE_GROUPS = { + "groups": [ + { + "gid": "gid1", + "user_id": "user_id1", + "project_id": "fake", + "display_name": "fake", + "display_description": "fake", + "status": "ACTIVE" + }, + { + "gid": "gid2", + "user_id": "user_id1", + "project_id": "fake", + "display_name": "fake", + "display_description": "fake", + "status": "ACTIVE" + }, + { + "gid": "gid3", + "user_id": "user_id2", + "project_id": "fake", + "display_name": "fake", + "display_description": "fake", + "status": "ACTIVE" + } + ] +} + + +def fake_create(context, kwargs): + return {"gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": kwargs["display_name"], + "display_description": kwargs["display_description"], + "status": "ACTIVE"} + + +def fake_update(context, kwargs): + return { + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "test", + "display_description": "test", + "status": "ACTIVE" + } + +def fake_delete(context, kwargs): + return { + } + +def fake_not_group_data_exists(context, kwargs): + return {"dummy-key" : "dummy-data"} + +def fake_not_group_data_not_exists(context, kwargs): + return {} + +def fake_raise_exception(context, kwargs): + raise Exception() + +def raise_group_not_found(context, kwargs): + raise exception.GroupNotFound(gid=GID) + +def fake_group_get_all(context, filters): + if not filters: + return copy.deepcopy(FAKE_GROUPS["groups"]) + else: + return [ + {"gid": "fake", + "user_id": "fake", + "project_id": filters["project_id"], + "display_name": filters["display_name"], + "display_description": "fake", + "status": filters["status"]} + ] + + +def fake_group_get_by_gid(context, gid): + return { + "gid": gid, + "user_id": "a4362182a2ac425c9b0b0826ad187d31", + "project_id": "a43621849823764c9b0b0826ad187d31t", + "display_name": "my_group", + "display_description": "This is my group.", + "status": "ACTIVE" + } + + +def get_request(url, method, body=None): + req = webob.Request.blank(url) + req.headers['Content-Type'] = 'application/json' + req.method = method + if body is not None: + req.body = jsonutils.dumps(body) + return req + + +class GroupsTest(test.NoDBTestCase): + + def setUp(self): + super(GroupsTest, self).setUp() + self.stubs.Set(db, "group_create", fake_create) + self.stubs.Set(db, "group_get_all", fake_group_get_all) + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + self.stubs.Set(db, "group_update", fake_update) + self.stubs.Set(db, "group_delete", fake_delete) + self.controller = groups.Controller() + self.app = fakes.wsgi_app() + + def test_index(self): + url = '/v1/groups' + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + expected = copy.deepcopy(FAKE_GROUPS) + for group in expected["groups"]: + group["name"] = group.pop("display_name") + group["description"] = group.pop("display_description") + self.assertEqual(res.status_code, 200) + self.assertEqual(body, expected) + + def test_index_filters(self): + url = '/v1/groups?project_id=PID&name=NAME&status=STATUS' + + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + + expected = {"groups": [ + {"gid": "fake", + "user_id": "fake", + "project_id": "PID", + "name": "NAME", + "description": "fake", + "status": "STATUS"} + ]} + self.assertEqual(res.status_code, 200) + self.assertEqual(body, expected) + + def test_show(self): + url = '/v1/groups/00000000-0000-0000-0000-000000000010' + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + expected = {"group": { + "gid": "00000000-0000-0000-0000-000000000010", + "user_id": "a4362182a2ac425c9b0b0826ad187d31", + "project_id": "a43621849823764c9b0b0826ad187d31t", + "name": "my_group", + "description": "This is my group.", + "status": "ACTIVE" + }} + + self.assertEqual(res.status_code, 200) + self.assertEqual(body, expected) + + def test_show_not_found_exception(self): + self.stubs.Set(db, "group_get_by_gid", + raise_group_not_found) + url = '/v1/groups/' + GID + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + self.assertRaises( + webob.exc.HTTPNotFound, self.controller.show, req, GID) + + def test_show_gid_is_not_uuid_format(self): + gid = "abcdefgid" + url = '/v1/groups/' + gid + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + self.assertRaises( + webob.exc.HTTPNotFound, self.controller.show, req, gid) + + def test_create(self): + name = "test_group" + description = "This is test group." + request_body = { + "group": { + "name": name, + "description": description, + } + } + expected = { + "group": { + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": name, + "description": description, + "status": "ACTIVE" + } + } + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 201) + for key in expected["group"]: + self.assertEqual(body["group"][key], expected["group"][key]) + + def test_create_group_name_is_whitespace(self): + request_body = { + "group": { + "name": " ", + "description": "This is test group", + } + } + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_group_name_with_leading_trailing_whitespace(self): + request_body = { + "group": { + "name": " test_group ", + "description": "This is test group" + } + } + expected = { + "group": { + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "test_group", + "description": "This is test group", + "status": "ACTIVE" + } + } + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 201) + for key in expected["group"]: + self.assertEqual(body["group"][key], expected["group"][key]) + + def test_create_without_group_name(self): + request_body = { + "group": { + "description": "This is test group", + } + } + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_without_group_description(self): + request_body = { + "group": { + "name": "test_group", + } + } + expected = { + "group": { + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "test_group", + "status": "ACTIVE" + } + } + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 201) + for key in expected["group"]: + self.assertEqual(body["group"][key], expected["group"][key]) + + def test_create_empty_body(self): + request_body = {"group": {}} + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_no_body(self): + request_body = {} + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_invalid_format_body(self): + request_body = [] + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_check_group_name_length(self): + MAX_LENGTH = 255 + request_body = { + "group": { + "name": "a" * (MAX_LENGTH + 1), + "description": "This is test group" + } + } + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_group_description_length_zero(self): + request_body = { + "group": { + "name": "test_group", + "description": "" + } + } + expected = { + "group": { + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "test_group", + "description": "", + "status": "ACTIVE" + } + } + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 201) + for key in expected["group"]: + self.assertEqual(body["group"][key], expected["group"][key]) + + def test_create_check_group_description_length(self): + MAX_LENGTH = 255 + request_body = { + "group": { + "name": "test_group", + "description": "a" * (MAX_LENGTH + 1) + } + } + + url = '/v1/groups' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update(self): + request_body = { + "group": { + "name": "My_Group_updated", + "description": "This is my group updated.", + } + } + expected = { + "group": { + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "test", + "description": "test", + "status": "ACTIVE" + } + } + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 200) + for key in request_body["group"]: + self.assertEqual(body["group"][key], expected["group"][key]) + + def test_update_allow_group_name_none(self): + request_body = { + "group": { + "description": "This is test group" + } + } + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 200) + + def test_update_allow_group_description_none(self): + request_body = { + "group": { + "name": "my_group", + } + } + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 200) + + def test_update_allow_group_description_blank(self): + request_body = { + "group": { + "name": "my_group", + "description": "", + } + } + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 200) + + def test_update_invalid_gid(self): + request_body = { + "group": { + "description": "This is test group" + } + } + + url = '/v1/groups/' + GID + "err" + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_update_empty_body(self): + request_body = {"group": {}} + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_no_body(self): + request_body = {} + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_invalid_format_body(self): + request_body = [] + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_group_name_blank(self): + request_body = { + "group": { + "name": "", + } + } + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_check_group_name_length(self): + MAX_LENGTH = 255 + request_body = { + "group": { + "name": "a" * (MAX_LENGTH + 1), + "description": "This is test group" + } + } + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_check_group_description_length(self): + MAX_LENGTH = 255 + request_body = { + "group": { + "name": "my_group", + "description": "a" * (MAX_LENGTH + 1) + } + } + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_group_not_found_on_db(self): + self.stubs.Set(db, "group_update", raise_group_not_found) + request_body = { + "group": { + "description": "This is test group" + } + } + + url = '/v1/groups/' + GID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + self.assertRaises( + webob.exc.HTTPNotFound, self.controller.update, req, request_body, GID) + + def test_delete_invalid_format_gid(self): + url = '/v1/groups/' + GID + "err" + req = get_request(url, 'DELETE') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + body = jsonutils.loads(res.body) + print(body) + + def test_delete(self): + url = '/v1/groups/'+GID + req = get_request(url, 'DELETE') + self.stubs.Set(db, "keypair_get_all", fake_not_group_data_not_exists) + self.stubs.Set(db, "securitygroup_get_all", fake_not_group_data_not_exists) + self.stubs.Set(db, "network_get_all", fake_not_group_data_not_exists) + self.stubs.Set(db, "process_get_all", fake_not_group_data_not_exists) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 204) + + def test_delete_group_inuse_keypair(self): + url = '/v1/groups/' + GID + req = get_request(url, 'DELETE') + self.stubs.Set(db, "keypair_get_all", fake_not_group_data_exists) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 409) + + def test_delete_group_inuse_securitygroup(self): + url = '/v1/groups/' + GID + req = get_request(url, 'DELETE') + self.stubs.Set(db, "keypair_get_all", fake_not_group_data_not_exists) + self.stubs.Set(db, "securitygroup_get_all", fake_not_group_data_exists) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 409) + + def test_delete_group_inuse_network(self): + url = '/v1/groups/' + GID + req = get_request(url, 'DELETE') + self.stubs.Set(db, "keypair_get_all", fake_not_group_data_not_exists) + self.stubs.Set(db, "securitygroup_get_all", fake_not_group_data_not_exists) + self.stubs.Set(db, "network_get_all", fake_not_group_data_exists) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 409) + + def test_delete_group_inuse_process(self): + url = '/v1/groups/' + GID + req = get_request(url, 'DELETE') + self.stubs.Set(db, "keypair_get_all", fake_not_group_data_not_exists) + self.stubs.Set(db, "securitygroup_get_all", fake_not_group_data_not_exists) + self.stubs.Set(db, "network_get_all", fake_not_group_data_not_exists) + self.stubs.Set(db, "process_get_all", fake_not_group_data_exists) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 409) + + def test_delete_exception(self): + url = '/v1/groups/' + GID + req = get_request(url, 'DELETE') + self.stubs.Set(db, "keypair_get_all", fake_raise_exception) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) diff --git a/rack/tests/api/v1/test_keypairs.py b/rack/tests/api/v1/test_keypairs.py new file mode 100644 index 0000000..e598afb --- /dev/null +++ b/rack/tests/api/v1/test_keypairs.py @@ -0,0 +1,722 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +import mox +from mox import IsA + +from rack.api.v1 import keypairs +from rack import context +from rack import db +from rack import exception +from rack.openstack.common import jsonutils +from rack.scheduler import rpcapi as scheduler_rpcapi +from rack.resourceoperator import rpcapi as operator_rpcapi +from rack import test +from rack.tests.api import fakes + +import uuid +import webob + +GID = unicode(uuid.uuid4()) +KEYPAIR_ID = unicode(uuid.uuid4()) +PRIVATE_KEY = ("-----BEGIN RSA PRIVATE KEY-----\n" + "MIIEoAIBAAKCAQEA6W34Ak32uxp7Oh0rh1mCQclkw+NeqchAOhyO/rcphFt280D9\n" + "YXxdUa43i51IDS9VpyFFd10Cv4ccynTPnky82CpGcuXCzaACzI/FHhmBeXTrFoXm\n" + "682b/8kXVQfCVfSjnvChxeeATjPu9GQkNrgyYyoubHxrrW7fTaRLEz/Np9CvCq/F\n" + "PJcsx7FwD0adFfmnulbZpplunqMGKX2nYXbDlLi7Ykjd3KbH1PRJuu+sPYDz3GmZ\n" + "4Z0naojOUDcajuMckN8RzNblBrksH8g6NDauoX5hQa9dyd1q36403NW9tcE6ZwNp\n" + "1GYCnN7/YgI/ugHo30ptpBvGw1zuY5/+FkU7SQIBIwKCAQA8BlW3cyIwHMCZ6j5k\n" + "ofzsWFu9V7lBmeShOosrji8/Srgv7CPl3iaf+ZlBKHGc/YsNuBktUm5rw6hRUTyz\n" + "rVUhpHiD8fBDgOrG4yQPDd93AM68phbO67pmWEfUCU86rJ8aPeB0t98qDVqz3zyD\n" + "GWwK3vX+o6ao8J/SIu67zpP381d/ZigDsq+yqhtPpz04YJ2W0w67NV6XSPOV1AX0\n" + "YLniHMwfbSTdwJ/wVWoooIgbTo7ldPuBsKUwNIVW8H9tmapVdyQxAS9JAkr1Y2si\n" + "xKURN4Iez2oyCFv5+P1emhoptgECr49kpOBAvhRfWWkumgR1azqynzTjSnpQVO62\n" + "vQr7AoGBAPkYWJX0tFNlqIWw4tcHtcPHJkRwvLdPUfM6Q0b6+YctKBmLoNJWBiXr\n" + "39wiYnftSdJO+L96HAG38RrmeCfafz19EDPVXepAUYZDwnY1HGx7ZqbiPwxYMN4C\n" + "+Wg3LzuSh7d5fe409+TCtX4YqSVFQd9gl8Ml3sKVOTxeaDROw6hFAoGBAO/mdJOr\n" + "SGcAj9V99df6IX8abZTPm2PmirT95WWwIYX4PRY//5iaCN6XyEKIx5TJk9lmcQhS\n" + "tb++PTsXpea01WUcxqaOO3vG7PQhvAbpq8A4eMBZZiY9UyctCPNSMscPPNRU2r/C\n" + "tAsXRk6BNkiGofgn2MY5YBoPkEgiJmJWMKE1AoGAeP0yV3bbPnM0mLUAdxJfmZs+\n" + "eQOO3LF/k2VxInnm6eK7tKLntp7PyUauj35qV4HiBxBqMR4Nmm9JOPOZcnFxAJvU\n" + "q3ZDjwlMK0V7tcIGfdWJoYPVewZDnwjCSI/VHO9mfbAJ91uOWStfd8LV0EY18Cea\n" + "K5YNHK7hSTUrTJtJFzcCgYB7YJO5qIuir9Txc/rG2Gj/ie82lqevuGSXmISaslpi\n" + "J+Tm3xW8MfXu0bdyrL5pxsEQuFdjXbyOfxgtBNj6Tl8eDsyQK+QTxWPrRIyV10Ji\n" + "2zbJUoxOLirDsMLGR4fUFncOHQLJBQwi9gbmi5hCjmHtVlI6DuD3dbfqlThP1I4J\n" + "wwKBgHfbOPVCgcJA3733J+dBC8gLt5QT2fCZ2N7PtaHcsSrW/B9VlGP+tviEC59U\n" + "bmpOLADzAto1MZdRDr8uXByZ8/eI37Txn6YchMVp43uL2+WaTdn9GBtOBpWJ0Pqi\n" + "x3HBmILbvIEzB2BX11/PDNGRMNcCy7edvnFMCxeAiW7DJqCb\n" + "-----END RSA PRIVATE KEY-----\n") + +KEYPAIR_ID1 = unicode(uuid.uuid4()) +KEYPAIR_ID2 = unicode(uuid.uuid4()) + +def _base_keypair_get_response(context): + return [ + { + "keypair_id": KEYPAIR_ID1, + "nova_keypair_id": "fake_key1", + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "fake_key1", + "private_key": PRIVATE_KEY, + "is_default": False, + "status": "ACTIVE" + }, + { + "keypair_id": KEYPAIR_ID2, + "nova_keypair_id": "fake_key2", + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "fake_key2", + "private_key": PRIVATE_KEY, + "is_default": False, + "status": "ACTIVE" + }, + ] + + +def fake_group_get_by_id(context, gid): + pass + + +def fake_keypair_get_all(context, gid, filters=None): + return _base_keypair_get_response(context) + + +def fake_keypair_get_by_keypair_id(context, gid, keypair_id): + keypair_list = _base_keypair_get_response(context) + for keypair in keypair_list: + if keypair["keypair_id"] == keypair_id: + return keypair + raise exception.KeypairNotFound() + + +def fake_create(context, kwargs): + return { + "keypair_id": KEYPAIR_ID, + "nova_keypair_id": kwargs.get("nova_keypair_id"), + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": kwargs.get("display_name"), + "is_default": kwargs.get("is_default"), + "status": "BUILDING" + } + + +def fake_update(context, gid, keypair_id, kwargs): + return { + "keypair_id": keypair_id, + "nova_keypair_id": "test_keypair", + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "test_keypair", + "private_key": PRIVATE_KEY, + "is_default": kwargs.get("is_default"), + "status": "ACTIVE" + } + + +def fake_delete(context, gid, keypair_id): + return { + "keypair_id": keypair_id, + "nova_keypair_id": "test_keypair", + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "test_keypair", + "private_key": PRIVATE_KEY, + "is_default": False, + "status": "DELETING" + } + + +def get_request(url, method, body=None): + req = webob.Request.blank(url) + req.headers['Content-Type'] = 'application/json' + req.method = method + if body is not None: + req.body = jsonutils.dumps(body) + return req + + +class KeypairsTest(test.NoDBTestCase): + + def setUp(self): + super(KeypairsTest, self).setUp() + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) + self.stubs.Set(db, "keypair_get_all", fake_keypair_get_all) + self.stubs.Set(db, "keypair_get_by_keypair_id", fake_keypair_get_by_keypair_id) + self.stubs.Set(db, "keypair_create", fake_create) + self.stubs.Set(db, "keypair_update", fake_update) + self.stubs.Set(db, "keypair_delete", fake_delete) + self.mox.StubOutWithMock(scheduler_rpcapi.SchedulerAPI, "select_destinations") + self.mox.StubOutWithMock(operator_rpcapi.ResourceOperatorAPI, "keypair_create") + self.mox.StubOutWithMock(operator_rpcapi.ResourceOperatorAPI, "keypair_delete") + self.mox.StubOutWithMock(db, "process_get_all") + self.app = fakes.wsgi_app() + + def test_index(self): + url = "/v1/groups/" + GID + "/keypairs" + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + expected = [ + { + "keypair_id": KEYPAIR_ID1, + "nova_keypair_id": "fake_key1", + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "fake_key1", + "private_key": PRIVATE_KEY, + "is_default": False, + "status": "ACTIVE" + }, + { + "keypair_id": KEYPAIR_ID2, + "nova_keypair_id": "fake_key2", + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "fake_key2", + "private_key": PRIVATE_KEY, + "is_default": False, + "status": "ACTIVE" + }, + ] + self.assertEqual(res.status_code, 200) + self.assertEqual(body["keypairs"], expected) + + def test_index_with_param(self): + param = \ + "?keypair_id=" + KEYPAIR_ID + \ + "?nova_keypair_id=" + KEYPAIR_ID + \ + "?status=" + KEYPAIR_ID + \ + "?is_default=" + KEYPAIR_ID + \ + "&name=test" + url = "/v1/groups/" + GID + "/keypairs" + param + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 200) + + def test_index_invalid_format_gid(self): + url = "/v1/groups/" + "aaaaa" + "/keypairs" + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show(self): + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID1 + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + expected = { + "keypair_id": KEYPAIR_ID1, + "nova_keypair_id": "fake_key1", + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "fake_key1", + "private_key": PRIVATE_KEY, + "is_default": False, + "status": "ACTIVE" + } + self.assertEqual(res.status_code, 200) + self.assertEqual(body["keypair"], expected) + + def test_show_invalid_format_gid(self): + url = "/v1/groups/" + "aaaaa" + "/keypairs/" + KEYPAIR_ID1 + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show_invalid_format_keypair_id(self): + url = "/v1/groups/" + GID + "/keypairs/" + "aaaaa" + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show_keypair_not_found(self): + self.mox.StubOutWithMock(db, "keypair_get_by_keypair_id") + db.keypair_get_by_keypair_id(IsA(context.RequestContext), GID, KEYPAIR_ID)\ + .AndRaise(exception.KeypairNotFound(keypair_id=KEYPAIR_ID)) + self.mox.ReplayAll() + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create(self): + name = "test_key" + request_body = { + "keypair": { + "name": name, + "is_default": "true", + } + } + + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.keypair_create( + IsA(context.RequestContext), "fake_host", gid=GID, keypair_id=IsA(unicode), name=name) + self.mox.ReplayAll() + + expected = { + "keypair": { + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": name, + "is_default": True, + "status": "BUILDING" + } + } + + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in expected["keypair"]: + self.assertEqual(body["keypair"][key], expected["keypair"][key]) + + def test_create_raise_exception_by_db_keypair_create(self): + self.mox.StubOutWithMock(db, "group_get_by_gid") + db.group_get_by_gid(IsA(context.RequestContext), GID)\ + .AndRaise(exception.GroupNotFound(gid=GID)) + self.mox.ReplayAll() + + request_body = { + "keypair": { + "name": "test_key", + } + } + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_raise_exception_by_scheduler_rpcapi(self): + self.mox.StubOutWithMock(db, "keypair_update") + db.keypair_update(IsA(context.RequestContext), GID, IsA(str), {"status": "ERROR"}) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + request_body = { + "keypair": { + "name": "test_key", + } + } + + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + def test_create_raise_exception_by_operator_rpcapi(self): + name = "test_key" + request_body = { + "keypair": { + "name": name, + } + } + self.mox.StubOutWithMock(db, "keypair_update") + db.keypair_update(IsA(context.RequestContext), GID, IsA(str), {"status": "ERROR"}) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.keypair_create( + IsA(context.RequestContext), "fake_host", gid=GID, keypair_id=IsA(unicode), name=name)\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + def test_create_invalid_format_gid(self): + request_body = { + "keypair": { + "name": "test_keypair", + } + } + + url = '/v1/groups/' + 'aaaaaaa' + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_keypair_name_is_whitespace(self): + request_body = { + "keypair": { + "name": " ", + } + } + + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_keypair_name_with_leading_trailing_whitespace(self): + request_body = { + "keypair": { + "name": " test_keypair ", + } + } + + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.keypair_create( + IsA(context.RequestContext), "fake_host", gid=GID, keypair_id=IsA(unicode), name="test_keypair") + self.mox.ReplayAll() + + expected = { + "keypair": { + "keypair_id": KEYPAIR_ID, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "test_keypair", + "is_default": False, + "status": "BUILDING" + } + } + + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in expected["keypair"]: + self.assertEqual(body["keypair"][key], expected["keypair"][key]) + + def test_create_without_name(self): + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.keypair_create( + IsA(context.RequestContext), "fake_host", gid=GID, keypair_id=IsA(unicode), name=IsA(unicode)) + self.mox.ReplayAll() + + request_body = { + "keypair": { + "is_default": "true", + } + } + expected = { + "keypair": { + "keypair_id": KEYPAIR_ID, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "is_default": True, + "status": "BUILDING" + } + } + + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in expected["keypair"]: + self.assertEqual(body["keypair"][key], expected["keypair"][key]) + regex = re.compile("keypair\-[a-z0-9]{8}\-[a-z0-9]{4}\-[a-z0-9]{4}\-[a-z0-9]{4}\-[a-z0-9]{12}") + self.assertTrue(regex.match(body["keypair"]["name"])) + + def test_create_without_is_default(self): + name = "test_keypair" + request_body = { + "keypair": { + "name": name, + } + } + + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.keypair_create( + IsA(context.RequestContext), "fake_host", gid=GID, keypair_id=IsA(unicode), name=name) + self.mox.ReplayAll() + + expected = { + "keypair": { + "keypair_id": KEYPAIR_ID, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "test_keypair", + "is_default": False, + "status": "BUILDING" + } + } + + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in expected["keypair"]: + self.assertEqual(body["keypair"][key], expected["keypair"][key]) + + def test_create_empty_body(self): + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.keypair_create( + IsA(context.RequestContext), "fake_host", gid=GID, keypair_id=IsA(unicode), name=IsA(unicode)) + self.mox.ReplayAll() + + request_body = {"keypair": {}} + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 202) + + def test_create_no_body(self): + request_body = {} + + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_invalid_format_body(self): + request_body = [] + + url = '/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_check_keypair_name_length(self): + MAX_LENGTH = 255 + request_body = { + "keypair": { + "name": "a" * (MAX_LENGTH + 1), + } + } + + url ='/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_invalid_is_default(self): + request_body = { + "keypair": { + "name": "test_keypair", + "is_default": "aaa" + } + } + + url ='/v1/groups/' + GID + '/keypairs' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update(self): + request_body = { + "keypair": { + "is_default": "true" + } + } + expected = { + "keypair": { + "keypair_id": KEYPAIR_ID, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "nova_keypair_id": "test_keypair", + "name": "test_keypair", + "private_key": PRIVATE_KEY, + "is_default": True, + "status": "ACTIVE" + } + } + + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 200) + for key in request_body["keypair"]: + self.assertEqual(body["keypair"][key], expected["keypair"][key]) + + def test_update_invalid_format_gid(self): + request_body = { + "keypair": { + "is_default": "true", + } + } + + url = "/v1/groups/" + "aaaaaaa" + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_update_invalid_format_keypair_id(self): + request_body = { + "keypair": { + "is_default": "true", + } + } + + url = "/v1/groups/" + GID + "/keypairs/" + "aaaaa" + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_update_invalid_format_is_default(self): + request_body = { + "keypair": { + "is_default": "aaa", + } + } + + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_without_is_default(self): + request_body = { + "keypair": { + "name": "aaa", + } + } + + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_empty_body(self): + request_body = {"keypair": {}} + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_no_body(self): + request_body = {} + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_invalid_body(self): + request_body = [] + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_delete(self): + db.process_get_all(IsA(context.RequestContext), + GID, + filters={"keypair_id": KEYPAIR_ID})\ + .AndReturn([]) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.keypair_delete( + IsA(context.RequestContext), "fake_host", nova_keypair_id=IsA(str)) + self.mox.ReplayAll() + + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 204) + + def test_delete_invalid_format_gid(self): + url = "/v1/groups/" + "aaaaaaa" + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_delete_invalid_format_keypair_id(self): + url = "/v1/groups/" + GID + "/keypairs/" + "aaaaa" + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_delete_keypair_not_found(self): + db.process_get_all(IsA(context.RequestContext), + GID, + filters={"keypair_id": KEYPAIR_ID})\ + .AndReturn([]) + self.mox.StubOutWithMock(db, "keypair_delete") + db.keypair_delete(IsA(context.RequestContext), GID, KEYPAIR_ID)\ + .AndRaise(exception.KeypairNotFound(keypair_id=KEYPAIR_ID)) + self.mox.ReplayAll() + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_delete_raise_exception_by_scheduler_rpcapi(self): + db.process_get_all(IsA(context.RequestContext), + GID, + filters={"keypair_id": KEYPAIR_ID})\ + .AndReturn([]) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + url = '/v1/groups/' + GID + '/keypairs/' + KEYPAIR_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + def test_delete_raise_exception_by_operator_rpcapi(self): + db.process_get_all(IsA(context.RequestContext), + GID, + filters={"keypair_id": KEYPAIR_ID})\ + .AndReturn([]) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.keypair_delete( + IsA(context.RequestContext), "fake_host", nova_keypair_id=IsA(str))\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + url = '/v1/groups/' + GID + '/keypairs/' + KEYPAIR_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + def test_delete_raise_exception_keypair_inuse(self): + db.process_get_all(IsA(context.RequestContext), + GID, + filters={"keypair_id": KEYPAIR_ID})\ + .AndRaise(exception.keypairInUse(keypair_id=KEYPAIR_ID)) + self.mox.ReplayAll() + url = "/v1/groups/" + GID + "/keypairs/" + KEYPAIR_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 409) diff --git a/rack/tests/api/v1/test_networks.py b/rack/tests/api/v1/test_networks.py new file mode 100644 index 0000000..3376c0d --- /dev/null +++ b/rack/tests/api/v1/test_networks.py @@ -0,0 +1,762 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack import test, db, context, exception +from rack.openstack.common import jsonutils +from rack.resourceoperator import rpcapi as resourceoperator_rpcapi +from rack.scheduler import rpcapi as scheduler_rpcapi +from rack.tests.api import fakes +import uuid + +import webob + +import mox +from exceptions import Exception + + +GID = unicode(uuid.uuid4()) +NETWORK_ID1 = unicode(uuid.uuid4()) +NETWORK_ID2 = unicode(uuid.uuid4()) +RO_HOST_NAME = "host_resource_operator" +NEUTRON_NW_ID = "neutron_network_id" + + +def fake_create_db(context, values): + values["network_id"] = NETWORK_ID1 + return values + + +def fake_group_get_by_gid(context, gid): + return {"gid": gid, + "status": "ACTIVE" + } + + +def fake_select_destinations(context, request_spec, filter_properties): + return {"host": RO_HOST_NAME} + + +def fake_network_get_all(context, gid, filters=None): + return _return_base_network_list(context, gid, filters=None) + + +def fake_network_get_all_empty_list(contextm, gid): + return [] + +def fake_raise_exception(): + raise Exception() + + +def _return_base_network_list(context, gid, filters=None): + return [ + { + "network_id": NETWORK_ID1, + "neutron_network_id": None, + "gid": gid, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "net-45212048-abc3-43cc-89b3-377341426ac", + "is_admin": "True", + "subnet": "10.0.0.0/24", + "ext_router": "91212048-abc3-43cc-89b3-377341426aca", + "status": "BUILDING" + }, + { + "network_id": NETWORK_ID2, + "neutron_network_id": None, + "gid": gid, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "net-13092048-abc3-43cc-89b3-377341426ac", + "is_admin": "True", + "subnet": "10.0.1.0/24", + "ext_router": "91212048-abc3-43cc-89b3-377341426aca", + "status": "BUILDING" + } + ] + + +def fake_network_get_by_network_id(context, gid, network_id): + network_dict = _return_base_network_list(context, gid)[0] + network_dict["processes"] = [] + return network_dict + + +def fake_network_delete(context, gid, network_id): + return { + "neutron_network_id": NEUTRON_NW_ID, + "ext_router": "fake_ext_router"} + + +def get_request(url, method, body=None): + req = webob.Request.blank(url) + req.headers['Content-Type'] = 'application/json' + req.method = method + if body is not None: + req.body = jsonutils.dumps(body) + return req + + +class FakeContext(context.RequestContext): + + def elevated(self): + """Return a consistent elevated context so we can detect it.""" + if not hasattr(self, '_elevated'): + self._elevated = super(FakeContext, self).elevated() + return self._elevated + + +class NetworksTest(test.NoDBTestCase): + + def setUp(self): + super(NetworksTest, self).setUp() + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + self.app = fakes.wsgi_app() + # fake context + self.user_id = 'fake' + self.project_id = 'fake' + self.context = FakeContext(self.user_id, self.project_id) + + # Tests for create ### + def test_create(self): + self.stubs.Set(db, "network_create", fake_create_db) + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + request_spec = {} + filter_properties = {} + self.mox.StubOutWithMock( + scheduler_rpcapi.SchedulerAPI, "select_destinations") + scheduler_rpcapi.SchedulerAPI.select_destinations( + mox.IsA(context.RequestContext), + request_spec, + filter_properties)\ + .AndReturn({"host": RO_HOST_NAME}) + + self.mox.StubOutWithMock( + resourceoperator_rpcapi.ResourceOperatorAPI, "network_create") + resourceoperator_rpcapi.ResourceOperatorAPI.network_create( + mox.IsA(context.RequestContext), + RO_HOST_NAME, + mox.IsA(dict)) + self.mox.ReplayAll() + + request_body = { + "network": { + "is_admin": "True", + "name": "network-test", + "cidr": "10.0.0.0/24", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + + expected_body = { + "network": { + "network_id": NETWORK_ID1, + "neutron_network_id": None, + "name": "network-test", + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "is_admin": True, + "cidr": "10.0.0.0/24", + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca", + "status": "BUILDING" + } + } + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + for key in expected_body["network"]: + self.assertEqual( + body["network"][key], expected_body["network"][key]) + self.assertEqual(res.status_code, 202) + + def test_create_validate_exception_by_gid_notfound_format(self): + request_body = { + "network": { + "name": "test_network", + "is_admin": "True", + "cidr": "10.0.0.0/24", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + url = '/v1/groups/' + GID + "a" + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_validate_exception_by_gid_notfound(self): + self.mox.StubOutWithMock( + db, "group_get_by_gid") + db.group_get_by_gid( + mox.IsA(context.RequestContext), + GID)\ + .AndRaise(exception.GroupNotFound(gid=GID)) + self.mox.ReplayAll() + request_body = { + "network": { + "name": "test_network", + "is_admin": "True", + "cidr": "10.0.0.0/24", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_validate_exception_no_body(self): + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_validate_exception_body_format(self): + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + + request_body = { + "name": "test_network", + "is_admin": "True", + "cidr": "10.0.0.0/24", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_validate_exception_by_cidr_required_none(self): + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + + request_body = { + "network": { + "name": "test_network", + "is_admin": "True", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_validate_exception_by_cidr_required_format(self): + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + + request_body = { + "network": { + "name": "test_network", + "is_admin": "True", + "cidr": "10.0.", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_by_required_item(self): + self.stubs.Set(db, "network_create", fake_create_db) + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + request_spec = {} + filter_properties = {} + self.mox.StubOutWithMock( + scheduler_rpcapi.SchedulerAPI, "select_destinations") + scheduler_rpcapi.SchedulerAPI.select_destinations( + mox.IsA(context.RequestContext), + request_spec, + filter_properties)\ + .AndReturn({"host": RO_HOST_NAME}) + + self.mox.StubOutWithMock( + resourceoperator_rpcapi.ResourceOperatorAPI, "network_create") + resourceoperator_rpcapi.ResourceOperatorAPI.network_create( + mox.IsA(context.RequestContext), + RO_HOST_NAME, + mox.IsA(dict)) + self.mox.ReplayAll() + + request_body = { + "network": { + "cidr": "10.0.0.0/24", + } + } + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 202) + + def test_create_by_name_blank(self): + self.stubs.Set(db, "network_create", fake_create_db) + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + request_spec = {} + filter_properties = {} + self.mox.StubOutWithMock( + scheduler_rpcapi.SchedulerAPI, "select_destinations") + scheduler_rpcapi.SchedulerAPI.select_destinations( + mox.IsA(context.RequestContext), + request_spec, + filter_properties)\ + .AndReturn({"host": RO_HOST_NAME}) + + self.mox.StubOutWithMock( + resourceoperator_rpcapi.ResourceOperatorAPI, "network_create") + resourceoperator_rpcapi.ResourceOperatorAPI.network_create( + mox.IsA(context.RequestContext), + RO_HOST_NAME, + mox.IsA(dict)) + self.mox.ReplayAll() + + request_body = { + "network": { + "name":"", + "is_admin": "True", + "cidr": "10.0.0.0/24" + } + } + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 202) + + + def test_create_validate_exception_by_name_max_length(self): + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + name_max_length = 255 + request_body = { + "network": { + "name": "a" * (name_max_length + 1), + "is_admin": "True", + "cidr": "10.0.0.0/24", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_validate_exception_by_is_admin_not_boolean(self): + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + + request_body = { + "network": { + "name": "test_network", + "cidr": "10.0.0.0/24", + "is_admin": "admin", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_validate_exception_by_gateway_format(self): + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + + request_body = { + "network": { + "name": "test_network", + "cidr": "10.0.0.0/24", + "is_admin": "True", + "gateway": "adfad", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_validate_exception_by_dns_format(self): + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + + request_body = { + "network": { + "name": "test_network", + "cidr": "10.0.0.0/24", + "is_admin": "True", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8258", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_empty_string_in_dns_nameservers(self): + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + + request_body = { + "network": { + "name": "test_network", + "cidr": "10.0.0.0/24", + "is_admin": "True", + "gateway": "10.0.0.254", + "dns_nameservers": ["", ""], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_exception_scheduler_rpcapi(self): + self.stubs.Set(db, "network_create", fake_create_db) + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + request_spec = {} + filter_properties = {} + self.mox.StubOutWithMock( + scheduler_rpcapi.SchedulerAPI, "select_destinations") + scheduler_rpcapi.SchedulerAPI.select_destinations( + mox.IsA(context.RequestContext), + request_spec, + filter_properties)\ + .AndRaise(Exception) + + self.mox.StubOutWithMock(db, "network_update") + error_values = {"status": "ERROR"} + db.network_update(mox.IsA(context.RequestContext), + NETWORK_ID1, + error_values) + + self.mox.ReplayAll() + + request_body = { + "network": { + "is_admin": "True", + "name": "network-test", + "cidr": "10.0.0.0/24", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + + self.assertEqual(res.status_code, 500) + + def test_create_exception_resorceoperator_rpcapi(self): + self.stubs.Set(db, "network_create", fake_create_db) + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + self.stubs.Set(resourceoperator_rpcapi.ResourceOperatorAPI, + "network_create", + fake_raise_exception) + request_spec = {} + filter_properties = {} + self.mox.StubOutWithMock( + scheduler_rpcapi.SchedulerAPI, "select_destinations") + scheduler_rpcapi.SchedulerAPI.select_destinations( + mox.IsA(context.RequestContext), + request_spec, + filter_properties)\ + .AndReturn({"host": RO_HOST_NAME}) + + self.mox.StubOutWithMock(db, "network_update") + error_values = {"status": "ERROR"} + db.network_update(mox.IsA(context.RequestContext), + NETWORK_ID1, + error_values) + self.mox.ReplayAll() + + request_body = { + "network": { + "is_admin": "True", + "name": "network-test", + "cidr": "10.0.0.0/24", + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca" + } + } + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + + self.assertEqual(res.status_code, 500) + + # Tests for index ### + def test_index(self): + self.stubs.Set( + db, "network_get_all", fake_network_get_all) + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + expected_body = {"networks": + [ + { + "network_id": NETWORK_ID1, + "neutron_network_id": None, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "net-45212048-abc3-43cc-89b3-377341426ac", + "is_admin": "True", + "cidr": "10.0.0.0/24", + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca", + "status": "BUILDING" + }, + { + "network_id": NETWORK_ID2, + "neutron_network_id": None, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "net-13092048-abc3-43cc-89b3-377341426ac", + "is_admin": "True", + "cidr": "10.0.1.0/24", + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca", + "status": "BUILDING" + } + ] + } + + self.assertEqual(body, expected_body) + self.assertEqual(res.status_code, 200) + + + def test_index_with_param(self): + self.stubs.Set( + db, "network_get_all", fake_network_get_all) + param = \ + "?network_id=" + NETWORK_ID1 + \ + "?neutron_network_id=" + NETWORK_ID1 + \ + "?status=" + NETWORK_ID1 + \ + "?is_admin=" + NETWORK_ID1 + \ + "?subnet=" + NETWORK_ID1 + \ + "?ext_router=" + NETWORK_ID1 + \ + "&name=test" + url = "/v1/groups/" + GID + "/networks" + param + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 200) + + def test_index_return_empty_list(self): + self.stubs.Set( + db, "network_get_all", fake_network_get_all_empty_list) + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + expected_body = {"networks": []} + + self.assertEqual(body, expected_body) + self.assertEqual(res.status_code, 200) + + def test_index_validate_exception_by_gid_format(self): + not_uuid_gid = "aaaaa" + url = '/v1/groups/' + not_uuid_gid + '/networks' + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + # Tests for show ### + def test_show(self): + self.stubs.Set( + db, "network_get_by_network_id", fake_network_get_by_network_id) + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url + "/" + NETWORK_ID1, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + + expected_body = {"network": + { + "network_id": NETWORK_ID1, + "neutron_network_id": None, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "net-45212048-abc3-43cc-89b3-377341426ac", + "is_admin": "True", + "cidr": "10.0.0.0/24", + "ext_router_id": "91212048-abc3-43cc-89b3-377341426aca", + "status": "BUILDING" + } + } + self.assertEqual(body, expected_body) + self.assertEqual(res.status_code, 200) + + def test_show_validate_exception_by_gid_format(self): + not_uuid_gid = "aaaaa" + url = '/v1/groups/' + not_uuid_gid + '/networks/' + NETWORK_ID1 + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show_validate_exception_by_network_id_format(self): + not_uuid_network_id = "aaaaa" + url = '/v1/groups/' + GID + '/networks/' + not_uuid_network_id + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show_exception_networknotfound(self): + self.mox.StubOutWithMock( + db, "network_get_by_network_id") + db.network_get_by_network_id( + mox.IsA(context.RequestContext), + GID, + NETWORK_ID1)\ + .AndRaise(exception.NetworkNotFound(network_id=NETWORK_ID1)) + self.mox.ReplayAll() + + url = '/v1/groups/' + GID + '/networks' + req = get_request(url + "/" + NETWORK_ID1, 'GET') + res = req.get_response(self.app) + + self.assertEqual(res.status_code, 404) + + # Tests for delete ### + def test_delete(self): + self.stubs.Set( + db, "network_get_by_network_id", fake_network_get_by_network_id) + self.stubs.Set( + db, "network_delete", fake_network_delete) + request_spec = {} + filter_properties = {} + self.mox.StubOutWithMock( + scheduler_rpcapi.SchedulerAPI, "select_destinations") + scheduler_rpcapi.SchedulerAPI.select_destinations( + mox.IsA( + context.RequestContext), + request_spec, + filter_properties)\ + .AndReturn({"host": RO_HOST_NAME}) + + self.mox.StubOutWithMock( + resourceoperator_rpcapi.ResourceOperatorAPI, "network_delete") + resourceoperator_rpcapi.ResourceOperatorAPI.network_delete( + mox.IsA( + context.RequestContext), + RO_HOST_NAME, + neutron_network_id=NEUTRON_NW_ID, + ext_router="fake_ext_router") + self.mox.ReplayAll() + + url = '/v1/groups/' + GID + '/networks/' + NETWORK_ID1 + req = get_request(url, 'DELETE') + res = req.get_response(self.app) + + self.assertEqual(res.status_code, 204) + + def test_delete_validate_exception_by_gid_format(self): + not_uuid_gid = "aaaaa" + url = '/v1/groups/' + not_uuid_gid + '/networks/' + NETWORK_ID1 + req = get_request(url, 'DELETE') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_delete_validate_exception_by_network_id_format(self): + not_uuid_network_id = "aaaaa" + url = '/v1/groups/' + GID + '/networks/' + not_uuid_network_id + req = get_request(url, 'DELETE') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_delete_exception_scheduler_rpcapi(self): + self.stubs.Set(db, "network_get_by_network_id", fake_network_get_by_network_id) + self.stubs.Set(db, "network_delete", fake_network_delete) + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + request_spec = {} + filter_properties = {} + self.mox.StubOutWithMock( + scheduler_rpcapi.SchedulerAPI, "select_destinations") + scheduler_rpcapi.SchedulerAPI.select_destinations( + mox.IsA(context.RequestContext), + request_spec, + filter_properties)\ + .AndRaise(Exception) + + self.mox.ReplayAll() + + url = '/v1/groups/' + GID + '/networks/' + NETWORK_ID1 + req = get_request(url, 'DELETE') + res = req.get_response(self.app) + + self.assertEqual(res.status_code, 500) + + def test_delete_exception_resorceoperator_rpcapi(self): + self.stubs.Set(db, "network_get_by_network_id", fake_network_get_by_network_id) + self.stubs.Set(db, "network_delete", fake_network_delete) + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_gid) + self.stubs.Set(resourceoperator_rpcapi.ResourceOperatorAPI, "network_delete", fake_raise_exception) + request_spec = {} + filter_properties = {} + self.mox.StubOutWithMock( + scheduler_rpcapi.SchedulerAPI, "select_destinations") + scheduler_rpcapi.SchedulerAPI.select_destinations( + mox.IsA(context.RequestContext), + request_spec, + filter_properties)\ + .AndReturn({"host": RO_HOST_NAME}) + + self.mox.ReplayAll() + + url = '/v1/groups/' + GID + '/networks/' + NETWORK_ID1 + req = get_request(url, 'DELETE') + res = req.get_response(self.app) + + self.assertEqual(res.status_code, 500) + + def test_delete_exception_inuse(self): + self.mox.StubOutWithMock(db, "network_get_by_network_id") + network_process_inuse = {"processes":[{"pid":"pid"}]} + db.network_get_by_network_id(mox.IsA(context.RequestContext), + GID, + NETWORK_ID1)\ + .AndReturn(network_process_inuse) + self.mox.ReplayAll() + + url = '/v1/groups/' + GID + '/networks/' + NETWORK_ID1 + req = get_request(url, 'DELETE') + res = req.get_response(self.app) + + self.assertEqual(res.status_code, 409) + diff --git a/rack/tests/api/v1/test_processes.py b/rack/tests/api/v1/test_processes.py new file mode 100644 index 0000000..8ad19fd --- /dev/null +++ b/rack/tests/api/v1/test_processes.py @@ -0,0 +1,747 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from mox import IsA +from mox import IgnoreArg + +from rack.api.v1 import processes +from rack import context +from rack import db +from rack import exception +from rack.openstack.common import jsonutils +from rack import test +from rack.tests.api import fakes +from rack.api.v1.views.processes import ViewBuilder +from rack.resourceoperator import rpcapi as operator_rpcapi +from rack.scheduler import rpcapi as scheduler_rpcapi + +import uuid +import webob +from rack.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +GID = unicode(uuid.uuid4()) + +PPID1 = unicode(uuid.uuid4()) +PPID2 = unicode(uuid.uuid4()) + +PID1 = unicode(uuid.uuid4()) +PID2 = unicode(uuid.uuid4()) + +PIDX = unicode(uuid.uuid4()) + +KEYPAIR_ID1 = unicode(uuid.uuid4()) +KEYPAIR_ID2 = unicode(uuid.uuid4()) + +NOVA_KEYPAIR_ID1 = unicode(uuid.uuid4()) + +SECURITYGROUP_ID1 = unicode(uuid.uuid4()) +SECURITYGROUP_ID2 = unicode(uuid.uuid4()) +SECURITYGROUP_ID3 = unicode(uuid.uuid4()) + +NEUTRON_SECURITYGROUP_ID1 = unicode(uuid.uuid4()) +NEUTRON_SECURITYGROUP_ID2 = unicode(uuid.uuid4()) +NEUTRON_SECURITYGROUP_ID3 = unicode(uuid.uuid4()) + +NETWORK_ID1 = unicode(uuid.uuid4()) +NETWORK_ID2 = unicode(uuid.uuid4()) + +NEUTRON_NETWORK_ID1 = unicode(uuid.uuid4()) +NEUTRON_NETWORK_ID2 = unicode(uuid.uuid4()) + +GLANCE_IMAGE_ID1 = unicode(uuid.uuid4()) +GLANCE_IMAGE_ID2 = unicode(uuid.uuid4()) + +METADATA1 = {"type1":"test1","type2":"test2"} + +NOVA_INSTANCE_ID = unicode(uuid.uuid4()) + +def _base(context): + return { + "user_id": context.user_id, + "project_id": context.project_id + } + + +def _base_keypair(keypair_id, nova_keypair_id): + return { + "keypair_id": keypair_id, + "nova_keypair_id": nova_keypair_id + } + + +def _base_securitygroup(securitygroup_id, neutron_securitygroup_id): + return { + "securitygroup_id": securitygroup_id, + "neutron_securitygroup_id": neutron_securitygroup_id + } + + +def _base_securitygroups1(): + return [ + _base_securitygroup(SECURITYGROUP_ID1, NEUTRON_SECURITYGROUP_ID1), + _base_securitygroup(SECURITYGROUP_ID2, NEUTRON_SECURITYGROUP_ID2), + ] + + +def _base_securitygroups2(): + return [ + _base_securitygroup(SECURITYGROUP_ID3, NEUTRON_SECURITYGROUP_ID3), + ] + + +def _base_network(network_id, neutron_network_id): + return { + "network_id": network_id, + "neutron_network_id": neutron_network_id + } + + +def _base_networks(): + return [ + _base_network(NETWORK_ID1, NEUTRON_NETWORK_ID1), + _base_network(NETWORK_ID2, NEUTRON_NETWORK_ID2), + ] + + +def _base_process1(gid, pid): + return { + "pid":pid, + "ppid":PPID1, + "nova_instance_id":None, + "gid":gid, + "display_name":"test1", + "nova_flavor_id":1, + "glance_image_id":GLANCE_IMAGE_ID1, + "keypair_id":KEYPAIR_ID1, + "securitygroups":_base_securitygroups1(), + "networks":_base_networks(), + "status":"BUILDING" + } + + +def _base_process2(gid, pid): + return { + "pid":pid, + "ppid":PPID2, + "nova_instance_id":None, + "gid":gid, + "display_name":"test2", + "nova_flavor_id":2, + "glance_image_id":GLANCE_IMAGE_ID2, + "keypair_id":KEYPAIR_ID2, + "securitygroups":_base_securitygroups2(), + "networks":_base_networks(), + "status":"BUILDING" + } + + +def _base_processes(gid): + return [ + _base_process1(gid, PPID1), + _base_process2(gid, PPID2), + _base_process1(gid, PID1), + _base_process2(gid, PID2), + ] + + +def fake_keypair_get_by_keypair_id(context, gid, keypair_id): + return _base_keypair(keypair_id, NOVA_KEYPAIR_ID1) + + +def fake_keypair_get_by_keypair_id_raise_not_found(context, gid, keypair_id): + raise exception.KeypairNotFound(keypair_id=keypair_id) + + +def fake_network_get_all(context, gid, filters=None): + return _base_networks() + + +def fake_network_get_all_not_found(context, gid, filters=None): + return [] + + +def fake_process_get_all(context, gid, filters=None): + processes = _base_processes(gid) + for process in processes: + process.update(_base(context)) + return processes + + +def fake_process_get_by_pid(context, gid, pid): + processes = _base_processes(gid) + for process in processes: + if process["pid"] == pid: + process.update(_base(context)) + return process + raise exception.ProcessNotFound(pid=pid) + + +def fake_pid1(): + return PID1 + + +def fake_create(context, kwargs, network_ids, securitygroup_ids): + process = _base(context) + process.update(kwargs) + process["networks"] = fake_network_get_all(context, GID) + process["securitygroups"] = _base_securitygroups1() + return process + + +def fake_delete(context, gid, pid): + process = _base(context) + process.update(gid=gid) + process.update(pid=pid) + process.update(nova_instance_id=NOVA_INSTANCE_ID) + return process + + +def get_request(url, method, body=None): + req = webob.Request.blank(url) + req.headers['Content-Type'] = 'application/json' + req.method = method + if body is not None: + req.body = jsonutils.dumps(body) + return req + + +def get_base_url(gid): + return "/v1/groups/" + gid + "/processes" + + +def get_base_body(process): + return { + "ppid": process["ppid"], + "name": process["display_name"], + "nova_flavor_id": process["nova_flavor_id"], + "glance_image_id": process["glance_image_id"], + "keypair_id": process["keypair_id"], + "securitygroup_ids": [securitygroup["securitygroup_id"] + for securitygroup in process["securitygroups"]], + "metadata" : METADATA1 + } + + +def get_base_request_body1(process): + return {"process": get_base_body(process)} + + +def get_base_process_body(process): + process_body = get_base_body(process) + process_body.update(gid=GID) + process_body.update(pid=process["pid"]) + process_body.update(status=process["status"]) + process_body.update(user_id="fake") + process_body.update(project_id="fake") + process_body.update(network_ids=[NETWORK_ID1,NETWORK_ID2]) + process_body.pop("metadata") + return process_body + + +def get_base_process_response_body(process): + process_body = get_base_process_body(process) + return {"process": process_body} + + +def get_base_processes_response_body(processes): + processes_body = [] + for process in processes: + process_body = get_base_process_body(process) + processes_body.append(process_body) + return {"processes": processes_body} + + +class ProcessesTest(test.NoDBTestCase): + + def _set_mox_db_process_update_on_error(self): + self.mox.StubOutWithMock(db, "process_update") + db.process_update(IsA(context.RequestContext), IsA(unicode), IsA(unicode), {"status": "ERROR"}) + + def _set_mox_scheduler_select_destinations(self, return_value={"host": "fake_host"}, do_process_update=True): + self.mox.StubOutWithMock(scheduler_rpcapi.SchedulerAPI, "select_destinations") + method = scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), + request_spec={}, + filter_properties={}) + if issubclass(return_value.__class__, Exception): + method.AndRaise(return_value) + if do_process_update: + self._set_mox_db_process_update_on_error() + else: + method.AndReturn(return_value) + + def _set_mox_resource_operator_process_create(self, exception=None): + self._set_mox_scheduler_select_destinations() + self.mox.StubOutWithMock(operator_rpcapi.ResourceOperatorAPI, "process_create") + method = operator_rpcapi.ResourceOperatorAPI.process_create(IsA(context.RequestContext), "fake_host", + pid=IsA(unicode), + ppid=IsA(unicode), + gid=IsA(unicode), + name=IsA(unicode), + glance_image_id=IsA(unicode), + nova_flavor_id=IsA(int), + nova_keypair_id=IgnoreArg(), + neutron_securitygroup_ids=IsA(list), + neutron_network_ids=IsA(list), + metadata=IsA(dict), + ) + if issubclass(exception.__class__, Exception): + method.AndRaise(exception) + self._set_mox_db_process_update_on_error() + + def _set_mox_resource_operator_process_delete(self, exception=None): + self._set_mox_scheduler_select_destinations() + self.mox.StubOutWithMock(operator_rpcapi.ResourceOperatorAPI, "process_delete") + method = operator_rpcapi.ResourceOperatorAPI.process_delete(IsA(context.RequestContext), "fake_host", + nova_instance_id=IsA(unicode), + ) + if issubclass(exception.__class__, Exception): + method.AndRaise(exception) + + def setUp(self): + super(ProcessesTest, self).setUp() + self.stubs.Set(uuid, "uuid4", fake_pid1) + self.stubs.Set(db, "keypair_get_by_keypair_id", fake_keypair_get_by_keypair_id) + self.stubs.Set(db, "network_get_all", fake_network_get_all) + self.stubs.Set(db, "process_get_all", fake_process_get_all) + self.stubs.Set(db, "process_get_by_pid", fake_process_get_by_pid) + self.stubs.Set(db, "process_create", fake_create) + self.stubs.Set(db, "process_delete", fake_delete) + self.app = fakes.wsgi_app() + self.view = ViewBuilder() + + def test_index(self): + processes = _base_processes(GID) + expect = get_base_processes_response_body(processes) + + url = get_base_url(GID) + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 200) + self.assertEqual(body, expect) + + def test_index_with_param(self): + param = \ + "?pid=" + PID1 + \ + "?ppid=" + PID1 + \ + "?status=" + PID1 + \ + "?glance_image_id=" + PID1 + \ + "?nova_flavor_id=" + PID1 + \ + "?securitygroup_id=" + PID1 + \ + "?keypair_id=" + PID1 + \ + "?network_id=" + PID1 + \ + "&name=test" + url = get_base_url(GID) + param + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 200) + + def test_index_invalid_format_gid(self): + url = get_base_url("aaaaa") + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show(self): + process = _base_process1(GID, PID1) + expect = get_base_process_response_body(process) + + url = get_base_url(GID) + "/" + PID1 + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 200) + self.assertEqual(body, expect) + + def test_show_invalid_format_gid(self): + url = get_base_url("aaaaa") + "/" + PID1 + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show_invalid_format_pid(self): + url = get_base_url(GID) + "/" + "aaaaa" + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show_process_not_found(self): + self.stubs.Set(db, "keypair_get_by_keypair_id", fake_keypair_get_by_keypair_id_raise_not_found) + url = get_base_url(GID) + "/" + PIDX + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create(self): + self._set_mox_resource_operator_process_create() + self.mox.ReplayAll() + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + expect = get_base_process_response_body(process) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in body["process"]: + self.assertEqual(body["process"][key], expect["process"][key]) + + def test_create_raise_exception_by_scheduler_rpcapi(self): + self._set_mox_scheduler_select_destinations(Exception()) + self.mox.ReplayAll() + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + def test_create_raise_exception_by_operator_rpcapi(self): + self._set_mox_resource_operator_process_create(Exception()) + self.mox.ReplayAll() + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + def test_create_invalid_format_gid(self): + gid = "aaaaaaaaa" + process = _base_process1(gid, PID1) + request_body = get_base_request_body1(process) + + url = get_base_url(gid) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_invalid_format_keypair_id(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(keypair_id="aaaaaaaaa") + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_without_keypair_id(self): + self._set_mox_resource_operator_process_create() + self.mox.ReplayAll() + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + expect = get_base_process_response_body(process) + + request_body["process"].pop("keypair_id") + expect["process"]["keypair_id"] = KEYPAIR_ID1 + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in body["process"]: + self.assertEqual(body["process"][key], expect["process"][key]) + + def test_create_invalid_format_ppid(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(ppid="aaaaaaaaa") + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_without_ppid(self): + self._set_mox_resource_operator_process_create() + self.mox.ReplayAll() + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + expect = get_base_process_response_body(process) + + request_body["process"].pop("ppid") + expect["process"]["ppid"] = None + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in body["process"]: + self.assertEqual(body["process"][key], expect["process"][key]) + + def test_create_process_name_is_whitespace(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(name=" ") + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_process_name_with_leading_trailing_whitespace(self): + self._set_mox_resource_operator_process_create() + self.mox.ReplayAll() + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + expect = get_base_process_response_body(process) + + request_body["process"]["name"] = " test " + expect["process"]["name"] = "test" + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in body["process"]: + self.assertEqual(body["process"][key], expect["process"][key]) + + def test_create_check_process_name_length(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + MAX_LENGTH = 255 + request_body["process"].update(name="a" * (MAX_LENGTH + 1)) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_check_process_name_invalid_type(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(name=11111111) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_without_process_name(self): + self._set_mox_resource_operator_process_create() + self.mox.ReplayAll() + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + expect = get_base_process_response_body(process) + + request_body["process"].pop("name") + expect["process"]["name"] = "pro-" + PID1 + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in body["process"]: + self.assertEqual(body["process"][key], expect["process"][key]) + + def test_create_invalid_format_glance_image_id(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(glance_image_id="aaaaaaaaa") + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_not_int_nova_flavor_id(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(ppid=None) + request_body["process"].update(nova_flavor_id=None) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_not_list_securitygroup_ids(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(securitygroup_ids=unicode(uuid.uuid4())) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_invalid_format_in_list_securitygroup_ids(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(securitygroup_ids=[unicode(uuid.uuid4()),"aaaaaaa"]) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_blank_list_securitygroup_ids(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(ppid=None) + request_body["process"].update(securitygroup_ids=[]) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_not_dict_metadata(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(metadata="aaaaaaa") + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_without_metadata(self): + self._set_mox_resource_operator_process_create() + self.mox.ReplayAll() + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + expect = get_base_process_response_body(process) + + request_body["process"].pop("metadata") + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in body["process"]: + self.assertEqual(body["process"][key], expect["process"][key]) + + def test_create_empty_body(self): + request_body = {"process": {}} + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_no_body(self): + request_body = {} + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_invalid_format_body(self): + request_body = [] + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_notfound_networks(self): + self.stubs.Set(db, "network_get_all", fake_network_get_all_not_found) + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_notfound_ppid(self): + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + request_body["process"].update(ppid=PIDX) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_notfound_keypair_id(self): + self.stubs.Set(db, "keypair_get_by_keypair_id", fake_keypair_get_by_keypair_id_raise_not_found) + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_without_secgroup_keypair_id_glance_image_flavor_id(self): + self._set_mox_resource_operator_process_create() + self.mox.ReplayAll() + + process = _base_process1(GID, PID1) + request_body = get_base_request_body1(process) + request_body["process"].update(securitygroup_ids=None) + request_body["process"].update(keypair_id=None) + request_body["process"].update(glance_image_id=None) + request_body["process"].update(nova_flavor_id=None) + + expect = get_base_process_response_body(process) + + request_body["process"].pop("metadata") + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in body["process"]: + self.assertEqual(body["process"][key], expect["process"][key]) + + def test_delete_invalid_format_gid(self): + url = get_base_url("aaaaaaa") + "/" + PID1 + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_delete_invalid_format_pid(self): + url = get_base_url(GID) + "/" + "aaaaa" + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) diff --git a/rack/tests/api/v1/test_securitygroups.py b/rack/tests/api/v1/test_securitygroups.py new file mode 100644 index 0000000..ab0bdac --- /dev/null +++ b/rack/tests/api/v1/test_securitygroups.py @@ -0,0 +1,982 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from mox import IsA + +from rack.api.v1 import securitygroups +from rack import context +from rack import db +from rack import exception +from rack.openstack.common import jsonutils +from rack.resourceoperator import rpcapi as operator_rpcapi +from rack.scheduler import rpcapi as scheduler_rpcapi +from rack import test +from rack.tests.api import fakes + +import uuid +import webob + +GID = unicode(uuid.uuid4()) +SECURITYGROUP_ID = unicode(uuid.uuid4()) + +SECURITYGROUP_ID1 = unicode(uuid.uuid4()) +SECURITYGROUP_ID2 = unicode(uuid.uuid4()) + +def _base_securitygroup_get_response(context): + return [ + { + "securitygroup_id": SECURITYGROUP_ID1, + "neutron_securitygroup_id": "fake_key1", + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "fake_key1", + "is_default": False, + "status": "ACTIVE" + }, + { + "securitygroup_id": SECURITYGROUP_ID2, + "neutron_securitygroup_id": "fake_key2", + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "fake_key2", + "is_default": False, + "status": "ACTIVE" + }, + ] + + +def fake_group_get_by_id(context, gid): + pass + + +def fake_securitygroup_get_all(context, gid, filters=None): + return _base_securitygroup_get_response(context) + + +def fake_securitygroup_get_by_securitygroup_id(context, gid, securitygroup_id): + securitygroup_list = _base_securitygroup_get_response(context) + for securitygroup in securitygroup_list: + if securitygroup["securitygroup_id"] == securitygroup_id: + return securitygroup + raise exception.SecuritygroupNotFound(securitygroup_id=securitygroup_id) + + +def fake_create(context, kwargs): + return { + "securitygroup_id": SECURITYGROUP_ID, + "neutron_securitygroup_id": kwargs.get("neutron_securitygroup_id"), + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": kwargs.get("display_name"), + "is_default": kwargs.get("is_default"), + "status": "BUILDING" + } + + +def fake_update(context, gid, securitygroup_id, kwargs): + return { + "securitygroup_id": securitygroup_id, + "neutron_securitygroup_id": "test_securitygroup", + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "test_securitygroup", + "is_default": kwargs.get("is_default"), + "status": "ACTIVE" + } + + +def fake_delete(context, gid, securitygroup_id): + return { + "securitygroup_id": securitygroup_id, + "neutron_securitygroup_id": "test_securitygroup", + "gid": GID, + "user_id": context.user_id, + "project_id": context.project_id, + "display_name": "test_securitygrouppair", + "is_default": False, + "status": "DELETING" + } + + +def fake_neutron_securitygroup_id(context, gid, securitygroup_id): + return {"neutron_securitygroup_id":"fake_id"} + + +def get_request(url, method, body=None): + req = webob.Request.blank(url) + req.headers['Content-Type'] = 'application/json' + req.method = method + if body is not None: + req.body = jsonutils.dumps(body) + return req + +def get_base_url(gid): + return "/v1/groups/" + gid + "/securitygroups" + +class SecuritygroupsTest(test.NoDBTestCase): + + def setUp(self): + super(SecuritygroupsTest, self).setUp() + self.stubs.Set(db, "group_get_by_gid", fake_group_get_by_id) + self.stubs.Set(db, "securitygroup_get_all", fake_securitygroup_get_all) + self.stubs.Set(db, "securitygroup_get_by_securitygroup_id", fake_securitygroup_get_by_securitygroup_id) + self.stubs.Set(db, "securitygroup_create", fake_create) + self.stubs.Set(db, "securitygroup_update", fake_update) + self.stubs.Set(db, "securitygroup_delete", fake_delete) + self.mox.StubOutWithMock(scheduler_rpcapi.SchedulerAPI, "select_destinations") + self.mox.StubOutWithMock(operator_rpcapi.ResourceOperatorAPI, "securitygroup_create") + self.mox.StubOutWithMock(operator_rpcapi.ResourceOperatorAPI, "securitygroup_delete") + self.app = fakes.wsgi_app() + + def test_index(self): + url = get_base_url(GID) + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + expected = [ + { + "securitygroup_id": SECURITYGROUP_ID1, + "neutron_securitygroup_id": "fake_key1", + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "fake_key1", + "is_default": False, + "status": "ACTIVE" + }, + { + "securitygroup_id": SECURITYGROUP_ID2, + "neutron_securitygroup_id": "fake_key2", + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "fake_key2", + "is_default": False, + "status": "ACTIVE" + }, + ] + self.assertEqual(res.status_code, 200) + self.assertEqual(body["securitygroups"], expected) + + def test_index_with_param(self): + param = \ + "?securitygroup_id=df1c7053-ddd8-49d8-bd27-913f37f08238" + \ + "&name=sec-df1c7053-ddd8-49d8-bd27-913f37f08238" + \ + "&is_default=t&status=ACTIVE" + url = get_base_url(GID) + param + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 200) + + def test_index_invalid_format_gid(self): + url = get_base_url("aaaaa") + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + + def test_show(self): + url = get_base_url(GID) + "/" + SECURITYGROUP_ID1 + req = get_request(url, 'GET') + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + expected = { + "securitygroup_id": SECURITYGROUP_ID1, + "neutron_securitygroup_id": "fake_key1", + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "fake_key1", + "is_default": False, + "status": "ACTIVE" + } + self.assertEqual(res.status_code, 200) + self.assertEqual(body["securitygroup"], expected) + + def test_show_invalid_format_gid(self): + url = get_base_url("aaaaa") + "/" + SECURITYGROUP_ID1 + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show_invalid_format_securitygroup_id(self): + url = get_base_url(GID) + "/" + "aaaaa" + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_show_securitygroup_not_found(self): + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, 'GET') + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create(self): + name = "test_securitygroup" + request_body = { + "securitygroup": { + "name": name, + "is_default": "true", + } + } + + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.securitygroup_create( + IsA(context.RequestContext), "fake_host", gid=GID, securitygroup_id=IsA(unicode), name=name, + securitygrouprules=[]) + self.mox.ReplayAll() + + expected = { + "securitygroup": { + "securitygroup_id": SECURITYGROUP_ID, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": name, + "is_default": True, + "status": "BUILDING" + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in expected["securitygroup"]: + self.assertEqual(body["securitygroup"][key], expected["securitygroup"][key]) + + def test_create_raise_exception_by_scheduler_rpcapi(self): + self.mox.StubOutWithMock(db, "securitygroup_update") + db.securitygroup_update(IsA(context.RequestContext), IsA(unicode), IsA(unicode), + IsA(dict)) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + request_body = { + "securitygroup": { + "name": "test_securitygroup", + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + + def test_create_raise_exception_by_operator_rpcapi(self): + name = "test_securitygroup" + request_body = { + "securitygroup": { + "name": name, + } + } + + self.mox.StubOutWithMock(db, "securitygroup_update") + db.securitygroup_update(IsA(context.RequestContext), GID, IsA(unicode), {"status": "ERROR"}) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.securitygroup_create( + IsA(context.RequestContext), "fake_host", gid=GID, securitygroup_id=IsA(unicode), name=name, + securitygrouprules=[])\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + def test_create_invalid_format_gid(self): + request_body = { + "securitygroup": { + "name": "test_securitygroup", + } + } + + url = get_base_url('aaaaaaaa') + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_securitygroup_name_is_whitespace(self): + request_body = { + "securitygroup": { + "name": " ", + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_securitygroup_name_with_leading_trailing_whitespace(self): + request_body = { + "securitygroup": { + "name": " test_securitygroup ", + } + } + + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.securitygroup_create( + IsA(context.RequestContext), "fake_host", gid=GID, securitygroup_id=IsA(unicode), name="test_securitygroup", + securitygrouprules=[]) + self.mox.ReplayAll() + + expected = { + "securitygroup": { + "securitygroup_id": SECURITYGROUP_ID, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "test_securitygroup", + "is_default": False, + "status": "BUILDING" + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in expected["securitygroup"]: + self.assertEqual(body["securitygroup"][key], expected["securitygroup"][key]) + + def test_create_without_securitygroup_name(self): + request_body = { + "securitygroup": { + "is_default": "true", + } + } + + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.securitygroup_create( + IsA(context.RequestContext), "fake_host", gid=GID, securitygroup_id=IsA(unicode), name=IsA(unicode), + securitygrouprules=[]) + self.mox.ReplayAll() + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 202) + + def test_create_without_is_default(self): + name = "test_securitygroup" + request_body = { + "securitygroup": { + "name": name, + } + } + + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.securitygroup_create( + IsA(context.RequestContext), "fake_host", gid=GID, securitygroup_id=IsA(unicode), name=name, + securitygrouprules=[]) + self.mox.ReplayAll() + + expected = { + "securitygroup": { + "securitygroup_id": SECURITYGROUP_ID, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": "test_securitygroup", + "is_default": False, + "status": "BUILDING" + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in expected["securitygroup"]: + self.assertEqual(body["securitygroup"][key], expected["securitygroup"][key]) + + def test_create_empty_body(self): + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.securitygroup_create( + IsA(context.RequestContext), "fake_host", gid=GID, securitygroup_id=IsA(unicode), name=IsA(unicode), + securitygrouprules=[]) + self.mox.ReplayAll() + + request_body = {"securitygroup": {}} + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 202) + + def test_create_no_body(self): + request_body = {} + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_invalid_format_body(self): + request_body = [] + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_check_securitygroup_name_length(self): + MAX_LENGTH = 255 + request_body = { + "securitygroup": { + "name": "a" * (MAX_LENGTH + 1), + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_invalid_is_default(self): + request_body = { + "securitygroup": { + "name": "test_securitygroup", + "is_default": "aaa" + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + + def test_create_with_rules(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + name = "test_securitygroup" + request_body = { + "securitygroup": { + "name": name, + "is_default": "true", + "securitygrouprules": + [ + { + "protocol": "icmp", + "remote_securitygroup_id": remote_securitygroup_id + }, + { + "port_range_max": "80", + "port_range_min": "80", + "protocol": "tcp", + "remote_securitygroup_id": remote_securitygroup_id + }, + {"protocol": "icmp","remote_ip_prefix": "192.168.0.0/16"}, + {"port_range_max": "80","port_range_min": "80","protocol": "tcp","remote_ip_prefix": "192.168.0.0/16"}, + {"port_range_max": "5000","port_range_min": "5000","protocol": "udp","remote_ip_prefix": "192.168.0.0/16"}, + ] + + } + } + + self.stubs.Set(db, "securitygroup_get_by_securitygroup_id", fake_neutron_securitygroup_id) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.securitygroup_create( + IsA(context.RequestContext), "fake_host", gid=GID, securitygroup_id=IsA(unicode), name=IsA(unicode), + securitygrouprules=[ + {"protocol": "icmp", "port_range_max": None, "port_range_min": None, + "remote_securitygroup_id": remote_securitygroup_id, + "remote_neutron_securitygroup_id": "fake_id", + "remote_ip_prefix": None}, + {"protocol": "tcp", "port_range_max": "80", "port_range_min": "80", + "remote_securitygroup_id": remote_securitygroup_id, + "remote_neutron_securitygroup_id": "fake_id", + "remote_ip_prefix": None}, + {"protocol": "icmp", "port_range_max": None, "port_range_min": None, + "remote_securitygroup_id": None, + "remote_ip_prefix": "192.168.0.0/16"}, + {"protocol": "tcp", "port_range_max": "80", "port_range_min": "80", + "remote_securitygroup_id": None, + "remote_ip_prefix": "192.168.0.0/16"}, + {"protocol": "udp", "port_range_max": "5000", "port_range_min": "5000", + "remote_securitygroup_id": None, + "remote_ip_prefix": "192.168.0.0/16"} + ]) + self.mox.ReplayAll() + + expected = { + "securitygroup": { + "securitygroup_id": SECURITYGROUP_ID, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "name": name, + "is_default": True, + "status": "BUILDING", + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 202) + for key in expected["securitygroup"]: + self.assertEqual(body["securitygroup"][key], expected["securitygroup"][key]) + + def test_create_with_rules_not_protocol(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "remote_securitygroup_id": remote_securitygroup_id + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_rules_invalid_protocol(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "icmpp", + "remote_securitygroup_id": remote_securitygroup_id + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_rules_invalid_remote_securitygroup_id(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "icmp", + "remote_securitygroup_id": remote_securitygroup_id + "error" + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_create_with_rules_invalid_remote_ip_prefix(self): + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "icmp", + "remote_ip_prefix": "error" + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_rules_no_remote_securitygroup_id_and_no_remote_ip_prefix(self): + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "icmp", + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_rules_remote_securitygroup_id_and_remote_ip_prefix(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "icmp", + "remote_securitygroup_id": remote_securitygroup_id, + "remote_ip_prefix": "192.168.0.0/16" + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_tcp_or_udp_rules_port_range_max_is_none(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "tcp", + "remote_securitygroup_id": remote_securitygroup_id, + "remote_ip_prefix": "192.168.0.0/16" + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_tcp_or_udp_rules_port_range_max_is_over_65535(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "tcp", + "remote_securitygroup_id": remote_securitygroup_id, + "remote_ip_prefix": "192.168.0.0/16", + "port_range_max": "65536" + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_tcp_or_udp_rules_port_range_max_is_zero(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "tcp", + "remote_securitygroup_id": remote_securitygroup_id, + "remote_ip_prefix": "192.168.0.0/16", + "port_range_max": "0" + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_tcp_or_udp_rules_port_range_min_is_over_65535(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "tcp", + "remote_securitygroup_id": remote_securitygroup_id, + "remote_ip_prefix": "192.168.0.0/16", + "port_range_max": "1", + "port_range_min": "65536" + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_tcp_or_udp_rules_port_range_min_is_higher_than_port_range_max(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "tcp", + "remote_securitygroup_id": remote_securitygroup_id, + "remote_ip_prefix": "192.168.0.0/16", + "port_range_max": "1", + "port_range_min": "2" + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_create_with_tcp_or_udp_rules_port_range_min_is_zero(self): + remote_securitygroup_id = "b755595b-3bdf-4152-8fb0-456d5e72eb01" + request_body = { + "securitygroup": { + "securitygrouprules": + [ + { + "protocol": "tcp", + "remote_securitygroup_id": remote_securitygroup_id, + "remote_ip_prefix": "192.168.0.0/16", + "port_range_max": "1", + "port_range_min": "0" + }, + ] + } + } + + url = get_base_url(GID) + req = get_request(url, 'POST', request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update(self): + request_body = { + "securitygroup": { + "is_default": "true" + } + } + expected = { + "securitygroup": { + "securitygroup_id": SECURITYGROUP_ID, + "gid": GID, + "user_id": "fake", + "project_id": "fake", + "neutron_securitygroup_id": "test_securitygroup", + "name": "test_securitygroup", + "is_default": True, + "status": "ACTIVE" + } + } + + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, 'PUT', request_body) + res = req.get_response(self.app) + body = jsonutils.loads(res.body) + self.assertEqual(res.status_code, 200) + for key in request_body["securitygroup"]: + self.assertEqual(body["securitygroup"][key], expected["securitygroup"][key]) + + def test_update_invalid_format_gid(self): + request_body = { + "securitygroup": { + "is_default": "true", + } + } + + url = get_base_url("aaaaaaa") + "/" + SECURITYGROUP_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_update_invalid_format_securitygroup_id(self): + request_body = { + "securitygroup": { + "is_default": "true", + } + } + + url = get_base_url(GID) + "/" + "aaaaa" + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_update_invalid_format_is_default(self): + request_body = { + "securitygroup": { + "is_default": "aaa", + } + } + + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_without_is_default(self): + request_body = { + "securitygroup": { + "name": "aaa", + } + } + + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_empty_body(self): + request_body = {"securitygroup": {}} + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_no_body(self): + request_body = {} + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_update_invalid_body(self): + request_body = [] + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "PUT", request_body) + res = req.get_response(self.app) + self.assertEqual(res.status_code, 400) + + def test_delete(self): + self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") + db.securitygroup_get_by_securitygroup_id(IsA(context.RequestContext), + GID, + SECURITYGROUP_ID)\ + .AndReturn({"processes":[]}) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.securitygroup_delete( + IsA(context.RequestContext), "fake_host", neutron_securitygroup_id="test_securitygroup") + self.mox.ReplayAll() + + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 204) + + def test_delete_invalid_format_gid(self): + url = get_base_url("aaaaaaa") + "/" + SECURITYGROUP_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_delete_invalid_format_securitygroup_id(self): + url = get_base_url(GID) + "/" + "aaaaa" + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_delete_securitygroup_not_found(self): + self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") + db.securitygroup_get_by_securitygroup_id(IsA(context.RequestContext), + GID, + SECURITYGROUP_ID)\ + .AndReturn({"processes":[]}) + self.mox.StubOutWithMock(db, "securitygroup_delete") + db.securitygroup_delete(IsA(context.RequestContext), GID, SECURITYGROUP_ID)\ + .AndRaise(exception.SecuritygroupNotFound(securitygroup_id=SECURITYGROUP_ID)) + self.mox.ReplayAll() + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 404) + + def test_delete_raise_exception_by_scheduler_rpcapi(self): + self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") + db.securitygroup_get_by_securitygroup_id(IsA(context.RequestContext), + GID, + SECURITYGROUP_ID)\ + .AndReturn({"processes":[]}) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + def test_delete_raise_exception_by_operator_rpcapi(self): + self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") + db.securitygroup_get_by_securitygroup_id(IsA(context.RequestContext), + GID, + SECURITYGROUP_ID)\ + .AndReturn({"processes":[]}) + scheduler_rpcapi.SchedulerAPI.select_destinations( + IsA(context.RequestContext), request_spec={}, filter_properties={})\ + .AndReturn({"host": "fake_host"}) + operator_rpcapi.ResourceOperatorAPI.securitygroup_delete( + IsA(context.RequestContext), "fake_host", neutron_securitygroup_id="test_securitygroup")\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 500) + + def test_delete_raise_exception_securitygroup_inuse(self): + self.mox.StubOutWithMock(db, "securitygroup_get_by_securitygroup_id") + db.securitygroup_get_by_securitygroup_id(IsA(context.RequestContext), + GID, + SECURITYGROUP_ID)\ + .AndReturn({"processes":[{"pid":"pid"}]}) + self.mox.ReplayAll() + url = get_base_url(GID) + "/" + SECURITYGROUP_ID + req = get_request(url, "DELETE") + res = req.get_response(self.app) + self.assertEqual(res.status_code, 409) diff --git a/rack/tests/conf_fixture.py b/rack/tests/conf_fixture.py new file mode 100644 index 0000000..b0dcea7 --- /dev/null +++ b/rack/tests/conf_fixture.py @@ -0,0 +1,41 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg + + +from rack import config +from rack.openstack.common.fixture import config as config_fixture +from rack import paths +from rack.tests import utils + +CONF = cfg.CONF +CONF.import_opt('use_ipv6', 'rack.netconf') +CONF.import_opt('host', 'rack.netconf') +CONF.import_opt('policy_file', 'rack.policy') +CONF.import_opt('api_paste_config', 'rack.wsgi') + + +class ConfFixture(config_fixture.Config): + """Fixture to manage global conf settings.""" + def setUp(self): + super(ConfFixture, self).setUp() + self.conf.set_default('api_paste_config', + paths.state_path_def('etc/api-paste.ini')) + self.conf.set_default('host', 'fake-mini') + self.conf.set_default('connection', "sqlite://", group='database') + self.conf.set_default('sqlite_synchronous', False, group='database') + self.conf.set_default('use_ipv6', True) + config.parse_args([], default_config_files=[]) + self.addCleanup(utils.cleanup_dns_managers) diff --git a/rack/tests/db/__init__.py b/rack/tests/db/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/tests/db/test_db_api.py b/rack/tests/db/test_db_api.py new file mode 100644 index 0000000..09f353c --- /dev/null +++ b/rack/tests/db/test_db_api.py @@ -0,0 +1,1056 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import mox + +from rack import context +from rack import db +from rack import exception +from rack import test +from rack.openstack.common.db import exception as db_exc +import uuid + + +class ModelsObjectComparatorMixin(object): + + def _dict_from_object(self, obj, ignored_keys): + if ignored_keys is None: + ignored_keys = [] + return dict([(k, v) for k, v in obj.iteritems() + if k not in ignored_keys]) + + def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): + obj1 = self._dict_from_object(obj1, ignored_keys) + obj2 = self._dict_from_object(obj2, ignored_keys) + + self.assertEqual(len(obj1), + len(obj2), + "Keys mismatch: %s" % + str(set(obj1.keys()) ^ set(obj2.keys()))) + for key, value in obj1.iteritems(): + self.assertEqual(value, obj2[key]) + + def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None): + obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) + sort_key = lambda d: [d[k] for k in sorted(d)] + conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key) + + self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2)) + + def _assertEqualOrderedListOfObjects(self, objs1, objs2, + ignored_keys=None): + obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) + conv = lambda obj: map(obj_to_dict, obj) + + self.assertEqual(conv(objs1), conv(objs2)) + + def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): + self.assertEqual(len(primitives1), len(primitives2)) + for primitive in primitives1: + self.assertIn(primitive, primitives2) + + for primitive in primitives2: + self.assertIn(primitive, primitives1) + + +class GroupTestCase(test.TestCase, ModelsObjectComparatorMixin): + + def setUp(self): + super(GroupTestCase, self).setUp() + self.ctxt = context.get_admin_context() + self.user_ctxt = context.RequestContext('user', 'user') + self.gid = unicode(uuid.uuid4()) + + def test_group_get_all(self): + # set test data + groups = [ + { + "display_name": "display_name_01", + "display_description": "display_description_01", + }, + { + "display_name": "display_name_02", + "display_description": "display_description_02", + }, + { + "display_name": "display_name_03", + "display_description": "display_description_03", + }, + { + "display_name": "display_name_04", + "display_description": "display_description_04", + } + ] + + # create test data to group table + user_ids = ["user_id_01", "user_id_02"] + created_groups_list = [] + for user_id in user_ids: + created_groups = [self._create_group(group, user_id=user_id, project_id=user_id) + for group in groups] + created_groups_list.append(created_groups) + + #test + ctext = context.RequestContext( + user_id=user_ids[0], project_id=user_ids[0]) + res_groups = db.group_get_all(ctext) + ignored_keys = ['deleted', 'deleted_at', 'updated_at', + 'created_at'] + self.assertEqual(len(res_groups), len(created_groups_list[0])) + for group in range(0, len(res_groups)): + self._assertEqualObjects(res_groups[group], created_groups_list[0][group], ignored_keys) + + def test_group_get_all_empty(self): + ctext = context.RequestContext( + user_id="user01", project_id="user01") + res_groups = db.group_get_all(ctext) + expected = [] + self.assertEqual(res_groups, expected) + + def test_group_get_by_gid(self): + # set test data + groups = [ + { + "display_name": "display_name_01", + "display_description": "display_description_01", + }, + { + "display_name": "display_name_02", + "display_description": "display_description_02", + }, + { + "display_name": "display_name_03", + "display_description": "display_description_03", + }, + { + "display_name": "display_name_04", + "display_description": "display_description_04", + } + ] + + # create test data to group table + user_id = "user_id_01" + created_groups = [self._create_group(group, user_id=user_id, project_id=user_id)for group in groups] + gid = created_groups[1]["gid"] + + #test + ctext = context.RequestContext( + user_id=user_id, project_id=user_id) + res_group = db.group_get_by_gid(ctext, gid) + ignored_keys = ['deleted', 'deleted_at', 'updated_at', + 'created_at'] + self._assertEqualObjects(res_group, created_groups[1], ignored_keys) + + def test_group_get_by_gid_not_found(self): + #test + user_id = "user_id_01" + ctext = context.RequestContext( + user_id=user_id, project_id=user_id) + gid="00000000-0000-0000-0000-000000000010" + status_code = 200 + try: + db.group_get_by_gid(ctext, gid) + except Exception as e: + status_code = e.code + self.assertEqual(status_code, 404) + + def _get_base_values(self): + return { + 'gid': 'fake_name', + 'user_id': 'fake_user_id', + 'project_id': 'fake_project_id', + 'display_name': 'fake_dispalay_name', + 'display_description': 'fake_display_description', + 'status': 'fake_status' + } + + def _create_group(self, values, user_id=None, project_id=None): + user_ctxt = context.RequestContext(user_id, project_id) + values['gid'] = unicode(uuid.uuid4()) + values['user_id'] = user_id + values['project_id'] = project_id + v = self._get_base_values() + v.update(values) + return db.group_create(user_ctxt, v) + + def test_group_create(self): + values = { + "gid": "12345678-1234-5678-9123-123456789012", + "user_id": "user", + "project_id": "user", + "display_name": "test_group", + "display_description": "This is test group", + "status": "active" + } + group = db.group_create(self.user_ctxt, values) + ignored_keys = ['deleted', 'deleted_at', 'updated_at', + 'created_at'] + values.update({"user_id": "user", + "project_id": "user", + "status": "active"}) + self.assertIsNotNone(group['gid']) + self._assertEqualObjects(group, values, ignored_keys) + + def test_group_update(self): + values_before = { + "gid": "12345678-1234-5678-9123-123456789012", + "user_id": "user", + "project_id": "user", + "display_name": "My_group", + "display_description": "This is my group.", + "status": "active" + } + group_before = db.group_create(self.user_ctxt, values_before) + values = { + "gid": group_before["gid"], + "display_name": "My_group_updated", + "display_description": "This is my group updated." + } + group = db.group_update(self.user_ctxt, values) + ignored_keys = ['deleted', 'deleted_at', 'updated_at','created_at' + , "user_id", "project_id", "status"] + self._assertEqualObjects(group, values, ignored_keys) + + def test_group_delete(self): + values_before = { + "gid": self.gid, + "user_id": "user_id", + "project_id": "project_id", + "display_name": "My_group", + "display_description": "This is my group.", + "status": "active" + } + db.group_create(self.user_ctxt, values_before) + deleted_group = db.group_delete(self.ctxt, self.gid) + self.assertEqual(deleted_group["deleted"], 1) + self.assertEqual(deleted_group["status"], "DELETING") + self.assertIsNotNone(deleted_group.get("deleted_at")) + + def test_group_update_gid_not_found(self): + #test + values_before = { + "gid": "12345678-1234-5678-9123-123456789012", + "user_id": "user", + "project_id": "user", + "display_name": "My_group", + "display_description": "This is my group.", + "status": "active" + } + group_before = db.group_create(self.user_ctxt, values_before) + values = { + "gid": group_before["gid"] + "not-found", + "display_name": "My_group_updated", + "display_description": "This is my group updated." + } + try: + db.group_update(self.user_ctxt, values) + except Exception as e: + status_code = e.code + self.assertEqual(status_code, 404) + + def test_group_delete_not_found(self): + self.assertRaises(exception.GroupNotFound, + db.group_delete, + context=self.user_ctxt, + gid=self.gid) + +class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin): + def setUp(self): + super(ServiceTestCase, self).setUp() + self.ctxt = context.get_admin_context() + + def _get_base_values(self): + return { + 'host': 'fake_host', + 'binary': 'fake_binary', + 'topic': 'fake_topic', + 'report_count': 3, + 'disabled': False + } + + def _create_service(self, values): + v = self._get_base_values() + v.update(values) + return db.service_create(self.ctxt, v) + + def test_service_create(self): + service = self._create_service({}) + self.assertIsNotNone(service['id']) + for key, value in self._get_base_values().iteritems(): + self.assertEqual(value, service[key]) + + def test_service_destroy(self): + service1 = self._create_service({}) + service2 = self._create_service({'host': 'fake_host2'}) + + db.service_destroy(self.ctxt, service1['id']) + self.assertRaises(exception.ServiceNotFound, + db.service_get, self.ctxt, service1['id']) + self._assertEqualObjects(db.service_get(self.ctxt, service2['id']), + service2) + + def test_service_update(self): + service = self._create_service({}) + new_values = { + 'host': 'fake_host1', + 'binary': 'fake_binary1', + 'topic': 'fake_topic1', + 'report_count': 4, + 'disabled': True + } + db.service_update(self.ctxt, service['id'], new_values) + updated_service = db.service_get(self.ctxt, service['id']) + for key, value in new_values.iteritems(): + self.assertEqual(value, updated_service[key]) + + def test_service_update_not_found_exception(self): + self.assertRaises(exception.ServiceNotFound, + db.service_update, self.ctxt, 100500, {}) + + def test_service_get(self): + service1 = self._create_service({}) + self._create_service({'host': 'some_other_fake_host'}) + real_service1 = db.service_get(self.ctxt, service1['id']) + self._assertEqualObjects(service1, real_service1, + ignored_keys=['compute_node']) + + def test_service_get_not_found_exception(self): + self.assertRaises(exception.ServiceNotFound, + db.service_get, self.ctxt, 100500) + + def test_service_get_by_host_and_topic(self): + service1 = self._create_service({'host': 'host1', 'topic': 'topic1'}) + self._create_service({'host': 'host2', 'topic': 'topic2'}) + + real_service1 = db.service_get_by_host_and_topic(self.ctxt, + host='host1', + topic='topic1') + self._assertEqualObjects(service1, real_service1) + + def test_service_get_all(self): + values = [ + {'host': 'host1', 'topic': 'topic1'}, + {'host': 'host2', 'topic': 'topic2'}, + {'disabled': True} + ] + services = [self._create_service(vals) for vals in values] + disabled_services = [services[-1]] + non_disabled_services = services[:-1] + + compares = [ + (services, db.service_get_all(self.ctxt)), + (disabled_services, db.service_get_all(self.ctxt, True)), + (non_disabled_services, db.service_get_all(self.ctxt, False)) + ] + for comp in compares: + self._assertEqualListsOfObjects(*comp) + + def test_service_get_all_by_topic(self): + values = [ + {'host': 'host1', 'topic': 't1'}, + {'host': 'host2', 'topic': 't1'}, + {'disabled': True, 'topic': 't1'}, + {'host': 'host3', 'topic': 't2'} + ] + services = [self._create_service(vals) for vals in values] + expected = services[:2] + real = db.service_get_all_by_topic(self.ctxt, 't1') + self._assertEqualListsOfObjects(expected, real) + + def test_service_get_all_by_host(self): + values = [ + {'host': 'host1', 'topic': 't11', 'binary': 'b11'}, + {'host': 'host1', 'topic': 't12', 'binary': 'b12'}, + {'host': 'host2', 'topic': 't1'}, + {'host': 'host3', 'topic': 't1'} + ] + services = [self._create_service(vals) for vals in values] + + expected = services[:2] + real = db.service_get_all_by_host(self.ctxt, 'host1') + self._assertEqualListsOfObjects(expected, real) + + def test_service_get_by_args(self): + values = [ + {'host': 'host1', 'binary': 'a'}, + {'host': 'host2', 'binary': 'b'} + ] + services = [self._create_service(vals) for vals in values] + + service1 = db.service_get_by_args(self.ctxt, 'host1', 'a') + self._assertEqualObjects(services[0], service1) + + service2 = db.service_get_by_args(self.ctxt, 'host2', 'b') + self._assertEqualObjects(services[1], service2) + + def test_service_get_by_args_not_found_exception(self): + self.assertRaises(exception.HostBinaryNotFound, + db.service_get_by_args, + self.ctxt, 'non-exists-host', 'a') + + def test_service_binary_exists_exception(self): + db.service_create(self.ctxt, self._get_base_values()) + values = self._get_base_values() + values.update({'topic': 'top1'}) + self.assertRaises(exception.ServiceBinaryExists, db.service_create, + self.ctxt, values) + + def test_service_topic_exists_exceptions(self): + db.service_create(self.ctxt, self._get_base_values()) + values = self._get_base_values() + values.update({'binary': 'bin1'}) + self.assertRaises(exception.ServiceTopicExists, db.service_create, + self.ctxt, values) + + +class NetworksTestCase(test.TestCase, ModelsObjectComparatorMixin): + def setUp(self): + super(NetworksTestCase, self).setUp() + self.ctxt = context.get_admin_context() + + self.gid = unicode(uuid.uuid4()) + self.network_id = unicode(uuid.uuid4()) + self.neutron_network_id = unicode(uuid.uuid4()) + self.ext_router_id = unicode(uuid.uuid4()) + + def test_networks_create(self): + values = { + "network_id": self.network_id, + "gid": self.gid, + "neutron_network_id": "", + "is_admin": True, + "subnet": "10.0.0.0/24", + "ext_router":"", + "user_id": "user", + "project_id": "user", + "display_name": "net-" + self.network_id, + "status": "BUILDING", + "deleted": 0 + } + network = db.network_create(self.ctxt, values) + + ignored_keys = ['deleted', + 'deleted_at', + 'updated_at', + 'created_at'] + self._assertEqualObjects(network, values, ignored_keys) + + def test_network_get_all(self): + values = { + "network_id": "", + "gid": self.gid, + "neutron_network_id": "", + "is_admin": True, + "subnet": "10.0.0.0/24", + "ext_router":"", + "user_id": "user", + "project_id": "user", + "display_name": "net-" + self.network_id, + "status": "BUILDING", + "deleted": 0 + } + for i in range(1-5): + values["network_id"] = "network_id" + str(i) + db.network_create(self.ctxt, values) + + network_list = db.network_get_all(self.ctxt, self.gid) + for network in network_list: + self.assertEqual(network["gid"], self.gid) + + def test_network_get_all_return_empty_list(self): + network_list = db.network_get_all(self.ctxt, self.gid) + self.assertEqual(network_list, []) + + def test_network_get_by_network_id(self): + values = { + "network_id": "", + "gid": self.gid, + "neutron_network_id": "", + "is_admin": True, + "subnet": "10.0.0.0/24", + "ext_router":"", + "user_id": "user", + "project_id": "user", + "display_name": "net-" + self.network_id, + "status": "BUILDING", + "deleted": 0 + } + for i in range(1-5): + values["network_id"] = "network_id" + str(i) + db.network_create(self.ctxt, values) + values["network_id"] = self.network_id + db.network_create(self.ctxt, values) + + network = db.network_get_by_network_id(self.ctxt, self.gid, self.network_id) + self.assertEqual(network["network_id"], self.network_id) + + def test_network_get_by_network_id_exception_notfound(self): + values = { + "network_id": "", + "gid": self.gid, + "neutron_network_id": "", + "is_admin": True, + "subnet": "10.0.0.0/24", + "ext_router":"", + "user_id": "user", + "project_id": "user", + "display_name": "net-" + self.network_id, + "status": "BUILDING", + "deleted": 0 + } + for i in range(1-5): + values["network_id"] = "network_id" + str(i) + db.network_create(self.ctxt, values) + + self.assertRaises(exception.NetworkNotFound, + db.network_get_by_network_id, + context=self.ctxt, + gid=self.gid, + network_id=self.network_id) + + def test_networks_update(self): + create_values = { + "network_id": self.network_id, + "gid": self.gid, + "neutron_network_id": "", + "is_admin": True, + "subnet": "10.0.0.0/24", + "ext_router":"", + "user_id": "user", + "project_id": "user", + "display_name": "net-" + self.network_id, + "status": "BUILDING", + "deleted": 0 + } + create_network = db.network_create(self.ctxt, create_values) + create_network["status"] = "ACTIVE" + + update_values = { + "status": "ACTIVE" + } + db.network_update(self.ctxt, self.network_id, update_values) + + network = db.network_get_by_network_id(self.ctxt, self.gid, self.network_id) + ignored_keys = ['deleted', + 'deleted_at', + 'updated_at', + 'processes'] + self.assertIsNotNone(network["updated_at"]) + self._assertEqualObjects(network, create_network, ignored_keys) + + def test_network_delete(self): + create_values = { + "network_id": self.network_id, + "gid": self.gid, + "neutron_network_id": "", + "is_admin": True, + "subnet": "10.0.0.0/24", + "ext_router":"", + "user_id": "user", + "project_id": "user", + "display_name": "net-" + self.network_id, + "status": "BUILDING", + "deleted": 0 + } + db.network_create(self.ctxt, create_values) + deleted_network = db.network_delete(self.ctxt, self.gid, self.network_id) + self.assertEqual(deleted_network["deleted"], 1) + network_list = db.network_get_all(self.ctxt, self.gid) + self.assertEqual(network_list, []) + + +PRIVATE_KEY = ("-----BEGIN RSA PRIVATE KEY-----\nMIIEoAIBA" + "AKCAQEA6W34Ak32uxp7Oh0rh1mCQclkw+NeqchAOhy" + "O/rcphFt280D9\nYXxdUa43i51IDS9VpyFFd10Cv4c" + "cynTPnky82CpGcuXCzaACzI/FHhmBeXTrFoXm\n682" + "b/8kXVQfCVfSjnvChxeeATjPu9GQkNrgyYyoubHxrr" + "W7fTaRLEz/Np9CvCq/F\nPJcsx7FwD0adFfmnulbZp" + "plunqMGKX2nYXbDlLi7Ykjd3KbH1PRJuu+sPYDz3Gm" + "Z\n4Z0naojOUDcajuMckN8RzNblBrksH8g6NDauoX5" + "hQa9dyd1q36403NW9tcE6ZwNp\n1GYCnN7/YgI/ugH" + "o30ptpBvGw1zuY5/+FkU7SQIBIwKCAQA8BlW3cyIwH" + "MCZ6j5k\nofzsWFu9V7lBmeShOosrji8/Srgv7CPl3" + "iaf+ZlBKHGc/YsNuBktUm5rw6hRUTyz\nrVUhpHiD8" + "fBDgOrG4yQPDd93AM68phbO67pmWEfUCU86rJ8aPeB" + "0t98qDVqz3zyD\nGWwK3vX+o6ao8J/SIu67zpP381d" + "/ZigDsq+yqhtPpz04YJ2W0w67NV6XSPOV1AX0\nYLn" + "iHMwfbSTdwJ/wVWoooIgbTo7ldPuBsKUwNIVW8H9tm" + "apVdyQxAS9JAkr1Y2si\nxKURN4Iez2oyCFv5+P1em" + "hoptgECr49kpOBAvhRfWWkumgR1azqynzTjSnpQVO6" + "2\nvQr7AoGBAPkYWJX0tFNlqIWw4tcHtcPHJkRwvLd" + "PUfM6Q0b6+YctKBmLoNJWBiXr\n39wiYnftSdJO+L9" + "6HAG38RrmeCfafz19EDPVXepAUYZDwnY1HGx7ZqbiP" + "wxYMN4C\n+Wg3LzuSh7d5fe409+TCtX4YqSVFQd9gl" + "8Ml3sKVOTxeaDROw6hFAoGBAO/mdJOr\nSGcAj9V99" + "df6IX8abZTPm2PmirT95WWwIYX4PRY//5iaCN6XyEK" + "Ix5TJk9lmcQhS\ntb++PTsXpea01WUcxqaOO3vG7PQ" + "hvAbpq8A4eMBZZiY9UyctCPNSMscPPNRU2r/C\ntAs" + "XRk6BNkiGofgn2MY5YBoPkEgiJmJWMKE1AoGAeP0yV" + "3bbPnM0mLUAdxJfmZs+\neQOO3LF/k2VxInnm6eK7t" + "KLntp7PyUauj35qV4HiBxBqMR4Nmm9JOPOZcnFxAJv" + "U\nq3ZDjwlMK0V7tcIGfdWJoYPVewZDnwjCSI/VHO9" + "mfbAJ91uOWStfd8LV0EY18Cea\nK5YNHK7hSTUrTJt" + "JFzcCgYB7YJO5qIuir9Txc/rG2Gj/ie82lqevuGSXm" + "ISaslpi\nJ+Tm3xW8MfXu0bdyrL5pxsEQuFdjXbyOf" + "xgtBNj6Tl8eDsyQK+QTxWPrRIyV10Ji\n2zbJUoxOL" + "irDsMLGR4fUFncOHQLJBQwi9gbmi5hCjmHtVlI6DuD" + "3dbfqlThP1I4J\nwwKBgHfbOPVCgcJA3733J+dBC8g" + "Lt5QT2fCZ2N7PtaHcsSrW/B9VlGP+tviEC59U\nbmp" + "OLADzAto1MZdRDr8uXByZ8/eI37Txn6YchMVp43uL2" + "+WaTdn9GBtOBpWJ0Pqi\nx3HBmILbvIEzB2BX11/PD" + "NGRMNcCy7edvnFMCxeAiW7DJqCb\n-----END RSA " + "PRIVATE KEY-----\n") + +class KeypairTestCase(test.TestCase, ModelsObjectComparatorMixin): + + def setUp(self): + super(KeypairTestCase, self).setUp() + self.ctxt = context.get_admin_context() + self.user_ctxt = context.RequestContext('user', 'user') + + def _get_base_values(self, gid): + return { + "keypair_id": "abcdefgh-ijkl-mnop-qrst-uvwxyzabcdef", + "gid": gid, + "user_id": self.user_ctxt.user_id, + "project_id": self.user_ctxt.project_id, + "nova_keypair_id": "nova-test-keypair", + "private_key": PRIVATE_KEY, + "display_name": "test_keypair", + "is_default": True, + "status": "BUILDING" + } + + def _create_group(self, gid): + values = { + "gid": gid, + "user_id": self.user_ctxt.user_id, + "project_id": self.user_ctxt.project_id, + "display_name": "test_group", + "dsplay_description": "This is test group.", + "is_default": False, + "status": "ACTIVE" + } + return db.group_create(self.user_ctxt, values) + + def _create_keypair(self, gid, values): + v = self._get_base_values(gid) + v.update(values) + return db.keypair_create(self.user_ctxt, v) + + def test_keypair_get_all(self): + gid = "12345678-1234-5678-9123-123456789012" + self._create_group(gid) + values = [ + {"keypair_id": unicode(uuid.uuid4()), + "display_name": "test_keypair1"}, + {"keypair_id": unicode(uuid.uuid4()), + "display_name": "test_keypair2"}, + {"keypair_id": unicode(uuid.uuid4()), + "display_name": "test_keypair3"}, + ] + keypairs = [self._create_keypair(gid, value) for value in values] + expected_keypairs = db.keypair_get_all(self.user_ctxt, gid) + self._assertEqualListsOfObjects(keypairs, expected_keypairs) + + def test_keypair_get_by_keypair_id(self): + gid = "12345678-1234-5678-9123-123456789012" + self._create_group(gid) + values = [ + {"keypair_id": unicode(uuid.uuid4()), + "display_name": "test_keypair1"}, + {"keypair_id": unicode(uuid.uuid4()), + "display_name": "test_keypair2"}, + ] + keypairs = [self._create_keypair(gid, value) for value in values] + expected = db.keypair_get_by_keypair_id(self.user_ctxt, gid, values[0]["keypair_id"]) + self._assertEqualObjects(keypairs[0], expected) + + def test_keypair_get_keypair_not_found(self): + gid = "12345678-1234-5678-9123-123456789012" + self._create_group(gid) + values = self._get_base_values(gid) + db.keypair_create(self.user_ctxt, values) + self.assertRaises(exception.KeypairNotFound, db.keypair_get_by_keypair_id, + self.user_ctxt, gid, "aaaaa") + + def test_keypair_create(self): + gid = "12345678-1234-5678-9123-123456789012" + self._create_group(gid) + + values = self._get_base_values(gid) + keypair = db.keypair_create(self.user_ctxt, values) + ignored_keys = ['deleted', 'deleted_at', 'updated_at', + 'created_at'] + self._assertEqualObjects(keypair, values, ignored_keys) + + def test_keypair_update(self): + gid = "12345678-1234-5678-9123-123456789012" + self._create_group(gid) + values_before = self._get_base_values(gid) + keypair = db.keypair_create(self.user_ctxt, values_before) + values = { + "is_default": False, + "status": "ACTIVE", + } + keypair_after = db.keypair_update(self.user_ctxt, gid, keypair["keypair_id"], values) + self.assertEqual(keypair_after["is_default"], False) + self.assertEqual(keypair_after["status"], "ACTIVE") + + def test_keypair_update_keypair_not_found(self): + gid = "12345678-1234-5678-9123-123456789012" + keypair_id = "12345678-1234-5678-9123-123456789012" + self.assertRaises(exception.KeypairNotFound, + db.keypair_update, + context=self.user_ctxt, + gid=gid, + keypair_id=keypair_id, + values={}) + + def test_keypair_delete(self): + gid = "12345678-1234-5678-9123-123456789012" + self._create_group(gid) + values_before = self._get_base_values(gid) + keypair = db.keypair_create(self.user_ctxt, values_before) + keypair_after = db.keypair_delete(self.user_ctxt, gid, keypair["keypair_id"]) + self.assertEquals("DELETING", keypair_after["status"]) + self.assertEquals(1, keypair_after["deleted"]) + self.assertIsNotNone(keypair_after.get("deleted_at")) + + def test_keypair_delete_not_found(self): + gid = "12345678-1234-5678-9123-123456789012" + keypair_id = "12345678-1234-5678-9123-123456789012" + self.assertRaises(exception.KeypairNotFound, + db.keypair_delete, + context=self.user_ctxt, + gid=gid, keypair_id=keypair_id) + +class SecuritygroupTestCase(test.TestCase, ModelsObjectComparatorMixin): + + def setUp(self): + super(SecuritygroupTestCase, self).setUp() + self.ctxt = context.get_admin_context() + self.user_ctxt = context.RequestContext('user', 'user') + + def _get_base_values(self, gid, securitygroup_id=None): + return { + "securitygroup_id": securitygroup_id or "abcdefgh-ijkl-mnop-qrst-uvwxyzabcdef", + "gid": gid, + "user_id": self.user_ctxt.user_id, + "project_id": self.user_ctxt.project_id, + "neutron_securitygroup_id": securitygroup_id or "neutron-test-securitygroup", + "display_name": "test_securitygroup", + "is_default": True, + "status": "BUILDING", + "deleted": 0 + } + + def _create_group(self, gid): + values = { + "gid": gid, + "user_id": self.user_ctxt.user_id, + "project_id": self.user_ctxt.project_id, + "display_name": "test_group", + "dsplay_description": "This is test group.", + "is_default": False, + "status": "ACTIVE", + "deleted": 0 + } + return db.group_create(self.user_ctxt, values) + + def test_securitygroup_get_all(self): + group = self._create_group("gid1") + securitygroup_ids = ["sc1","sc2","sc3"] + securitygroups = [] + for securitygroup_id in securitygroup_ids: + securitygroup = db.securitygroup_create( + self.user_ctxt, self._get_base_values(group["gid"], securitygroup_id)) + securitygroups.append(securitygroup) + + res_securitygroups = db.securitygroup_get_all(context, group["gid"]) + ignored_keys = ['deleted_at', 'updated_at', 'created_at'] + self.assertEqual(len(res_securitygroups), len(securitygroups)) + for i in range(0, len(res_securitygroups)): + self._assertEqualObjects(res_securitygroups[i], securitygroups[i], ignored_keys) + + def test_securitygroup_get_all_empty(self): + res_securitygroups = db.securitygroup_get_all(context, "gid") + expected = [] + self.assertEqual(res_securitygroups, expected) + + def test_securitygroup_get_by_securitygroup_id(self): + group = self._create_group("gid1") + securitygroup_ids = ["sc1","sc2","sc3"] + securitygroups = [] + for securitygroup_id in securitygroup_ids: + securitygroup = db.securitygroup_create( + self.user_ctxt, self._get_base_values(group["gid"], securitygroup_id)) + securitygroups.append(securitygroup) + + res_securitygroup = db.securitygroup_get_by_securitygroup_id(self.user_ctxt, group["gid"], securitygroup_ids[0]) + ignored_keys = ['deleted_at', 'updated_at', 'created_at', 'processes'] + self._assertEqualObjects(res_securitygroup, securitygroups[0], ignored_keys) + + def test_securitygroup_get_by_securitygroup_id_not_found(self): + try: + db.securitygroup_get_by_securitygroup_id(self.user_ctxt, "gid", "sec") + except Exception as e: + status_code = e.code + self.assertEqual(status_code, 404) + + def test_securitygroup_create(self): + gid = "12345678-1234-5678-9123-123456789012" + self._create_group(gid) + + values = self._get_base_values(gid) + securitygroup = db.securitygroup_create(self.user_ctxt, values) + ignored_keys = ['deleted', 'deleted_at', 'updated_at', + 'created_at'] + self._assertEqualObjects(securitygroup, values, ignored_keys) + + def test_securitygroup_update(self): + gid = "12345678-1234-5678-9123-123456789012" + self._create_group(gid) + values_before = self._get_base_values(gid) + securitygroup = db.securitygroup_create(self.user_ctxt, values_before) + values = { + "is_default": False, + "status": "ACTIVE", + } + securitygroup_after = db.securitygroup_update(self.user_ctxt, gid, securitygroup["securitygroup_id"], values) + self.assertEqual(securitygroup_after["is_default"], False) + self.assertEqual(securitygroup_after["status"], "ACTIVE") + + def test_securitygroup_update_securitygroup_not_found(self): + gid = "12345678-1234-5678-9123-123456789012" + securitygroup_id = "12345678-1234-5678-9123-123456789012" + self.assertRaises(exception.SecuritygroupNotFound, + db.securitygroup_update, + context=self.user_ctxt, + gid=gid, + securitygroup_id=securitygroup_id, + values={}) + + def test_securitygroup_delete(self): + gid = "12345678-1234-5678-9123-123456789012" + self._create_group(gid) + values_before = self._get_base_values(gid) + securitygroup = db.securitygroup_create(self.user_ctxt, values_before) + securitygroup_after = db.securitygroup_delete(self.user_ctxt, gid, securitygroup["securitygroup_id"]) + self.assertEquals("DELETING", securitygroup_after["status"]) + self.assertEquals(1, securitygroup_after["deleted"]) + self.assertIsNotNone(securitygroup_after.get("deleted_at")) + + def test_securitygroup_delete_not_found(self): + gid = "12345678-1234-5678-9123-123456789012" + securitygroup_id = "12345678-1234-5678-9123-123456789012" + self.assertRaises(exception.SecuritygroupNotFound, + db.securitygroup_delete, + context=self.user_ctxt, + gid=gid, securitygroup_id=securitygroup_id) + + +class ProcessTestCase(test.TestCase, ModelsObjectComparatorMixin): + + def setUp(self): + super(ProcessTestCase, self).setUp() + self.ctxt = context.get_admin_context() + self.user_ctxt = context.RequestContext('user', 'user') + self.gid = unicode(uuid.uuid4()) + self.group = self._create_group(self.gid) + self.network = self._create_network(self.gid) + self.keypair = self._create_keypair(self.gid) + self.securitygroup = self._create_securitygroup(self.gid) + + def _get_base_values(self): + return { + "pid": unicode(uuid.uuid4()), + "ppid": unicode(uuid.uuid4()), + "nova_instance_id": unicode(uuid.uuid4()), + "glance_image_id": unicode(uuid.uuid4()), + "nova_flavor_id": 1, + "keypair_id": self.keypair["keypair_id"], + "gid": self.gid, + "user_id": self.user_ctxt.user_id, + "project_id": self.user_ctxt.project_id, + "display_name": "test_process", + "status": "BUILDING", + "deleted": 0 + } + + def _create_group(self, gid): + values = { + "gid": gid, + "user_id": self.user_ctxt.user_id, + "project_id": self.user_ctxt.project_id, + "display_name": "test_group", + "dsplay_description": "This is test group.", + "is_default": False, + "status": "ACTIVE", + "deleted": 0 + } + return db.group_create(self.user_ctxt, values) + + def _create_network(self, gid): + values = { + "gid": gid, + "network_id": unicode(uuid.uuid4()), + "ext_router": unicode(uuid.uuid4()), + "subnet": "10.0.0.1/24", + "user_id": self.user_ctxt.user_id, + "project_id": self.user_ctxt.project_id, + "display_name": "test_network", + "is_admin": False, + "status": "ACTIVE", + "deleted": 0 + } + return db.network_create(self.user_ctxt, values) + + def _create_keypair(self, gid): + values = { + "gid": gid, + "keypair_id": unicode(uuid.uuid4()), + "private_key": "test", + "user_id": self.user_ctxt.user_id, + "project_id": self.user_ctxt.project_id, + "display_name": "test_keypair", + "is_default": False, + "status": "ACTIVE", + "deleted": 0 + } + return db.keypair_create(self.user_ctxt, values) + + def _create_securitygroup(self, gid): + values = { + "gid": gid, + "securitygroup_id": unicode(uuid.uuid4()), + "user_id": self.user_ctxt.user_id, + "project_id": self.user_ctxt.project_id, + "display_name": "test_securitygroup", + "is_default": False, + "status": "ACTIVE", + "deleted": 0 + } + return db.securitygroup_create(self.user_ctxt, values) + + def _create_process(self, gid, create_count): + processes = [] + for i in range(0, create_count): + process = db.process_create( + self.user_ctxt, + self._get_base_values(), + [self.network["network_id"]], + [self.securitygroup["securitygroup_id"]]) + processes.append(process) + return processes + + def test_process_get_all(self): + processes = self._create_process(self.gid, 3) + res_processes = db.process_get_all(context, self.gid) + ignored_keys = ['deleted_at', 'updated_at', 'created_at'] + self.assertEqual(len(res_processes), len(processes)) + for i in range(0, len(res_processes)): + self._assertEqualObjects(res_processes[i], processes[i], ignored_keys) + + def test_process_get_all_empty(self): + res_processes = db.process_get_all(context, self.gid) + expected = [] + self.assertEqual(res_processes, expected) + + def test_process_get_by_pid(self): + processes = self._create_process(self.gid, 3) + res_process = db.process_get_by_pid(self.user_ctxt, self.gid, processes[0]["pid"]) + ignored_keys = ['deleted_at', 'updated_at', 'created_at'] + self._assertEqualObjects(res_process, processes[0], ignored_keys) + + def test_process_get_by_pid_not_found(self): + try: + db.process_get_by_pid(self.user_ctxt, self.gid, "notfound-pid") + except Exception as e: + status_code = e.code + self.assertEqual(status_code, 404) + + def test_process_create(self): + values = self._get_base_values() + process = db.process_create(self.user_ctxt, + values, + [self.network["network_id"]], + [self.securitygroup["securitygroup_id"]]) + + values["networks"] = [self.network] + values["securitygroups"] = [self.securitygroup] + ignored_keys = ['deleted', 'deleted_at', 'updated_at', + 'created_at'] + + self._assertEqualObjects(process, values, ignored_keys) + + def test_process_create_duplicated_network_id(self): + values = self._get_base_values() + try: + db.process_create(self.user_ctxt, + values, + [self.network["network_id"], self.network["network_id"]], + [self.securitygroup["securitygroup_id"]]) + except exception.InvalidInput as e: + status_code = e.code + self.assertEqual(status_code, 400) + + def test_process_create_duplicated_securitygroup_id(self): + values = self._get_base_values() + try: + db.process_create(self.user_ctxt, + values, + [self.network["network_id"]], + [self.securitygroup["securitygroup_id"], self.securitygroup["securitygroup_id"]]) + except exception.InvalidInput as e: + status_code = e.code + self.assertEqual(status_code, 400) + + def test_process_update(self): + values_before = self._get_base_values() + process = db.process_create(self.user_ctxt, + values_before, + [self.network["network_id"]], + [self.securitygroup["securitygroup_id"]]) + values = { + "display_name": "test", + "status": "ACTIVE", + } + process_after = db.process_update(self.user_ctxt, self.gid, process["pid"], values) + self.assertEqual(process_after["display_name"], "test") + self.assertEqual(process_after["status"], "ACTIVE") + + def test_process_update_process_not_found(self): + self.assertRaises(exception.ProcessNotFound, + db.process_update, + context=self.user_ctxt, + gid=self.gid, + pid=unicode(uuid.uuid4()), + values={}) + + def test_process_delete(self): + values_before = self._get_base_values() + process = db.process_create(self.user_ctxt, + values_before, + [self.network["network_id"]], + [self.securitygroup["securitygroup_id"]]) + process_after = db.process_delete(self.user_ctxt, self.gid, process["pid"]) + self.assertEquals("DELETING", process_after["status"]) + self.assertEquals(1, process_after["deleted"]) + self.assertIsNotNone(process_after.get("deleted_at")) + + def test_process_delete_not_found(self): + self.assertRaises(exception.ProcessNotFound, + db.process_delete, + context=self.user_ctxt, + gid=self.gid, pid=unicode(uuid.uuid4())) diff --git a/rack/tests/db/test_migrations.conf b/rack/tests/db/test_migrations.conf new file mode 100644 index 0000000..310b705 --- /dev/null +++ b/rack/tests/db/test_migrations.conf @@ -0,0 +1,26 @@ +[unit_tests] +# Set up any number of databases to test concurrently. +# The "name" used in the test is the config variable key. + +# A few tests rely on one sqlite database with 'sqlite' as the key. + +sqlite=sqlite:// +#sqlitefile=sqlite:///test_migrations_utils.db +#mysql=mysql+mysqldb://user:pass@localhost/test_migrations_utils +#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations_utils + +[migration_dbs] +# Migration DB details are listed separately as they can't be connected to +# concurrently. These databases can't be the same as above + +# Note, sqlite:// is in-memory and unique each time it is spawned. +# However file sqlite's are not unique. + +sqlite=sqlite:// +#sqlitefile=sqlite:///test_migrations.db +#mysql=mysql+mysqldb://user:pass@localhost/test_migrations +#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations + +[walk_style] +snake_walk=yes +downgrade=yes diff --git a/rack/tests/db/test_migrations.py b/rack/tests/db/test_migrations.py new file mode 100644 index 0000000..c1ad94d --- /dev/null +++ b/rack/tests/db/test_migrations.py @@ -0,0 +1,605 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Tests for database migrations. This test case reads the configuration +file test_migrations.conf for database connection settings +to use in the tests. For each connection found in the config file, +the test case runs a series of test cases to ensure that migrations work +properly both upgrading and downgrading, and that no data loss occurs +if possible. + +There are also "opportunistic" tests for both mysql and postgresql in here, +which allows testing against all 3 databases (sqlite in memory, mysql, pg) in +a properly configured unit test environment. + +For the opportunistic testing you need to set up db's named 'openstack_citest' +and 'openstack_baremetal_citest' with user 'openstack_citest' and password +'openstack_citest' on localhost. The test will then use that db and u/p combo +to run the tests. + +For postgres on Ubuntu this can be done with the following commands: + +sudo -u postgres psql +postgres=# create user openstack_citest with createdb login password + 'openstack_citest'; +postgres=# create database openstack_citest with owner openstack_citest; +postgres=# create database openstack_baremetal_citest with owner + openstack_citest; + +""" + +import ConfigParser +import glob +import os + +from migrate.versioning import repository +import six.moves.urllib.parse as urlparse +import sqlalchemy +import sqlalchemy.exc + +import rack.db.sqlalchemy.migrate_repo +from rack.db.sqlalchemy import utils as db_utils +from rack.openstack.common.db.sqlalchemy import utils as oslodbutils +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging +from rack.openstack.common import processutils +from rack import test +from rack import utils + + +LOG = logging.getLogger(__name__) + + +def _have_mysql(user, passwd, database): + present = os.environ.get('RACK_TEST_MYSQL_PRESENT') + if present is None: + return oslodbutils.is_backend_avail('mysql+mysqldb', database, + user, passwd) + return present.lower() in ('', 'true') + + +def _have_postgresql(user, passwd, database): + present = os.environ.get('RACK_TEST_POSTGRESQL_PRESENT') + if present is None: + return oslodbutils.is_backend_avail('postgresql+psycopg2', database, + user, passwd) + return present.lower() in ('', 'true') + + +def get_mysql_connection_info(conn_pieces): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + if auth_pieces[1].strip(): + password = "-p\"%s\"" % auth_pieces[1] + + return (user, password, database, host) + + +def get_pgsql_connection_info(conn_pieces): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + password = auth_pieces[1].strip() + + return (user, password, database, host) + + +class CommonTestsMixIn(object): + """These tests are shared between TestRackMigrations and + TestBaremetalMigrations. + + BaseMigrationTestCase is effectively an abstract class, meant to be derived + from and not directly tested against; that's why these `test_` methods need + to be on a Mixin, so that they won't be picked up as valid tests for + BaseMigrationTestCase. + """ + def test_walk_versions(self): + for key, engine in self.engines.items(): + # We start each walk with a completely blank slate. + self._reset_database(key) + self._walk_versions(engine, self.snake_walk, self.downgrade) + + def test_mysql_opportunistically(self): + self._test_mysql_opportunistically() + + def test_mysql_connect_fail(self): + """Test that we can trigger a mysql connection failure and we fail + gracefully to ensure we don't break people without mysql + """ + if oslodbutils.is_backend_avail('mysql+mysqldb', self.DATABASE, + "openstack_cifail", self.PASSWD): + self.fail("Shouldn't have connected") + + def test_postgresql_opportunistically(self): + self._test_postgresql_opportunistically() + + def test_postgresql_connect_fail(self): + """Test that we can trigger a postgres connection failure and we fail + gracefully to ensure we don't break people without postgres + """ + if oslodbutils.is_backend_avail('postgresql+psycopg2', self.DATABASE, + "openstack_cifail", self.PASSWD): + self.fail("Shouldn't have connected") + + +class BaseMigrationTestCase(test.NoDBTestCase): + """Base class for testing migrations and migration utils. This sets up + and configures the databases to run tests against. + """ + + # NOTE(jhesketh): It is expected that tests clean up after themselves. + # This is necessary for concurrency to allow multiple tests to work on + # one database. + # The full migration walk tests however do call the old _reset_databases() + # to throw away whatever was there so they need to operate on their own + # database that we know isn't accessed concurrently. + # Hence, BaseWalkMigrationTestCase overwrites the engine list. + + USER = None + PASSWD = None + DATABASE = None + + TIMEOUT_SCALING_FACTOR = 2 + + def __init__(self, *args, **kwargs): + super(BaseMigrationTestCase, self).__init__(*args, **kwargs) + + self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), + 'test_migrations.conf') + # Test machines can set the RACK_TEST_MIGRATIONS_CONF variable + # to override the location of the config file for migration testing + self.CONFIG_FILE_PATH = os.environ.get('RACK_TEST_MIGRATIONS_CONF', + self.DEFAULT_CONFIG_FILE) + self.MIGRATE_FILE = rack.db.sqlalchemy.migrate_repo.__file__ + self.REPOSITORY = repository.Repository( + os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) + self.INIT_VERSION = 0 + + self.snake_walk = False + self.downgrade = False + self.test_databases = {} + self.migration = None + self.migration_api = None + + def setUp(self): + super(BaseMigrationTestCase, self).setUp() + self._load_config() + + def _load_config(self): + # Load test databases from the config file. Only do this + # once. No need to re-run this on each test... + LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) + if os.path.exists(self.CONFIG_FILE_PATH): + cp = ConfigParser.RawConfigParser() + try: + cp.read(self.CONFIG_FILE_PATH) + config = cp.options('unit_tests') + for key in config: + self.test_databases[key] = cp.get('unit_tests', key) + self.snake_walk = cp.getboolean('walk_style', 'snake_walk') + self.downgrade = cp.getboolean('walk_style', 'downgrade') + + except ConfigParser.ParsingError as e: + self.fail("Failed to read test_migrations.conf config " + "file. Got error: %s" % e) + else: + self.fail("Failed to find test_migrations.conf config " + "file.") + + self.engines = {} + for key, value in self.test_databases.items(): + self.engines[key] = sqlalchemy.create_engine(value) + + # NOTE(jhesketh): We only need to make sure the databases are created + # not necessarily clean of tables. + self._create_databases() + + def execute_cmd(self, cmd=None): + out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True) + output = out or err + LOG.debug(output) + self.assertEqual('', err, + "Failed to run: %s\n%s" % (cmd, output)) + + @utils.synchronized('pgadmin', external=True) + def _reset_pg(self, conn_pieces): + (user, password, database, host) = \ + get_pgsql_connection_info(conn_pieces) + os.environ['PGPASSWORD'] = password + os.environ['PGUSER'] = user + # note(boris-42): We must create and drop database, we can't + # drop database which we have connected to, so for such + # operations there is a special database template1. + sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" + " '%(sql)s' -d template1") + sqldict = {'user': user, 'host': host} + + sqldict['sql'] = ("drop database if exists %s;") % database + droptable = sqlcmd % sqldict + self.execute_cmd(droptable) + + sqldict['sql'] = ("create database %s;") % database + createtable = sqlcmd % sqldict + self.execute_cmd(createtable) + + os.unsetenv('PGPASSWORD') + os.unsetenv('PGUSER') + + @utils.synchronized('mysql', external=True) + def _reset_mysql(self, conn_pieces): + # We can execute the MySQL client to destroy and re-create + # the MYSQL database, which is easier and less error-prone + # than using SQLAlchemy to do this via MetaData...trust me. + (user, password, database, host) = \ + get_mysql_connection_info(conn_pieces) + sql = ("drop database if exists %(database)s; " + "create database %(database)s;" % {'database': database}) + cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " + "-e \"%(sql)s\"" % {'user': user, 'password': password, + 'host': host, 'sql': sql}) + self.execute_cmd(cmd) + + @utils.synchronized('sqlite', external=True) + def _reset_sqlite(self, conn_pieces): + # We can just delete the SQLite database, which is + # the easiest and cleanest solution + db_path = conn_pieces.path.strip('/') + if os.path.exists(db_path): + os.unlink(db_path) + # No need to recreate the SQLite DB. SQLite will + # create it for us if it's not there... + + def _create_databases(self): + """Create all configured databases as needed.""" + for key, engine in self.engines.items(): + self._create_database(key) + + def _create_database(self, key): + """Create database if it doesn't exist.""" + conn_string = self.test_databases[key] + conn_pieces = urlparse.urlparse(conn_string) + + if conn_string.startswith('mysql'): + (user, password, database, host) = \ + get_mysql_connection_info(conn_pieces) + sql = "create database if not exists %s;" % database + cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " + "-e \"%(sql)s\"" % {'user': user, 'password': password, + 'host': host, 'sql': sql}) + self.execute_cmd(cmd) + elif conn_string.startswith('postgresql'): + (user, password, database, host) = \ + get_pgsql_connection_info(conn_pieces) + os.environ['PGPASSWORD'] = password + os.environ['PGUSER'] = user + + sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" + " '%(sql)s' -d template1") + + sql = ("create database if not exists %s;") % database + createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql} + # 0 means databases is created + # 256 means it already exists (which is fine) + # otherwise raise an error + out, err = processutils.trycmd(createtable, shell=True, + check_exit_code=[0, 256], + discard_warnings=True) + output = out or err + if err != '': + self.fail("Failed to run: %s\n%s" % (createtable, output)) + + os.unsetenv('PGPASSWORD') + os.unsetenv('PGUSER') + + def _reset_databases(self): + """Reset all configured databases.""" + for key, engine in self.engines.items(): + self._reset_database(key) + + def _reset_database(self, key): + """Reset specific database.""" + engine = self.engines[key] + conn_string = self.test_databases[key] + conn_pieces = urlparse.urlparse(conn_string) + engine.dispose() + if conn_string.startswith('sqlite'): + self._reset_sqlite(conn_pieces) + elif conn_string.startswith('mysql'): + self._reset_mysql(conn_pieces) + elif conn_string.startswith('postgresql'): + self._reset_pg(conn_pieces) + + +class BaseWalkMigrationTestCase(BaseMigrationTestCase): + """BaseWalkMigrationTestCase loads in an alternative set of databases for + testing against. This is necessary as the default databases can run tests + concurrently without interfering with itself. It is expected that + databases listed under [migraiton_dbs] in the configuration are only being + accessed by one test at a time. Currently only test_walk_versions accesses + the databases (and is the only method that calls _reset_database() which + is clearly problematic for concurrency). + """ + + def _load_config(self): + # Load test databases from the config file. Only do this + # once. No need to re-run this on each test... + LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) + if os.path.exists(self.CONFIG_FILE_PATH): + cp = ConfigParser.RawConfigParser() + try: + cp.read(self.CONFIG_FILE_PATH) + config = cp.options('migration_dbs') + for key in config: + self.test_databases[key] = cp.get('migration_dbs', key) + self.snake_walk = cp.getboolean('walk_style', 'snake_walk') + self.downgrade = cp.getboolean('walk_style', 'downgrade') + except ConfigParser.ParsingError as e: + self.fail("Failed to read test_migrations.conf config " + "file. Got error: %s" % e) + else: + self.fail("Failed to find test_migrations.conf config " + "file.") + + self.engines = {} + for key, value in self.test_databases.items(): + self.engines[key] = sqlalchemy.create_engine(value) + + self._create_databases() + + def _test_mysql_opportunistically(self): + # Test that table creation on mysql only builds InnoDB tables + if not _have_mysql(self.USER, self.PASSWD, self.DATABASE): + self.skipTest("mysql not available") + # add this to the global lists to make reset work with it, it's removed + # automatically in tearDown so no need to clean it up here. + connect_string = oslodbutils.get_connect_string( + "mysql+mysqldb", self.DATABASE, self.USER, self.PASSWD) + (user, password, database, host) = \ + get_mysql_connection_info(urlparse.urlparse(connect_string)) + engine = sqlalchemy.create_engine(connect_string) + self.engines[database] = engine + self.test_databases[database] = connect_string + + # build a fully populated mysql database with all the tables + self._reset_database(database) + self._walk_versions(engine, self.snake_walk, self.downgrade) + + connection = engine.connect() + # sanity check + total = connection.execute("SELECT count(*) " + "from information_schema.TABLES " + "where TABLE_SCHEMA='%(database)s'" % + {'database': database}) + self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") + + noninnodb = connection.execute("SELECT count(*) " + "from information_schema.TABLES " + "where TABLE_SCHEMA='%(database)s' " + "and ENGINE!='InnoDB' " + "and TABLE_NAME!='migrate_version'" % + {'database': database}) + count = noninnodb.scalar() + self.assertEqual(count, 0, "%d non InnoDB tables created" % count) + connection.close() + + del(self.engines[database]) + del(self.test_databases[database]) + + def _test_postgresql_opportunistically(self): + # Test postgresql database migration walk + if not _have_postgresql(self.USER, self.PASSWD, self.DATABASE): + self.skipTest("postgresql not available") + # add this to the global lists to make reset work with it, it's removed + # automatically in tearDown so no need to clean it up here. + connect_string = oslodbutils.get_connect_string( + "postgresql+psycopg2", self.DATABASE, self.USER, self.PASSWD) + engine = sqlalchemy.create_engine(connect_string) + (user, password, database, host) = \ + get_pgsql_connection_info(urlparse.urlparse(connect_string)) + self.engines[database] = engine + self.test_databases[database] = connect_string + + # build a fully populated postgresql database with all the tables + self._reset_database(database) + self._walk_versions(engine, self.snake_walk, self.downgrade) + del(self.engines[database]) + del(self.test_databases[database]) + + def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): + # Determine latest version script from the repo, then + # upgrade from 1 through to the latest, with no data + # in the databases. This just checks that the schema itself + # upgrades successfully. + + # Place the database under version control + self.migration_api.version_control(engine, + self.REPOSITORY, + self.INIT_VERSION) + self.assertEqual(self.INIT_VERSION, + self.migration_api.db_version(engine, + self.REPOSITORY)) + + LOG.debug('latest version is %s' % self.REPOSITORY.latest) + versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) + + for version in versions: + # upgrade -> downgrade -> upgrade + self._migrate_up(engine, version, with_data=True) + if snake_walk: + downgraded = self._migrate_down( + engine, version - 1, with_data=True) + if downgraded: + self._migrate_up(engine, version) + + if downgrade: + # Now walk it back down to 0 from the latest, testing + # the downgrade paths. + for version in reversed(versions): + # downgrade -> upgrade -> downgrade + downgraded = self._migrate_down(engine, version - 1) + + if snake_walk and downgraded: + self._migrate_up(engine, version) + self._migrate_down(engine, version - 1) + + def _migrate_down(self, engine, version, with_data=False): + try: + self.migration_api.downgrade(engine, self.REPOSITORY, version) + except NotImplementedError: + # NOTE(sirp): some migrations, namely release-level + # migrations, don't support a downgrade. + return False + + self.assertEqual(version, + self.migration_api.db_version(engine, + self.REPOSITORY)) + + # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target' + # version). So if we have any downgrade checks, they need to be run for + # the previous (higher numbered) migration. + if with_data: + post_downgrade = getattr( + self, "_post_downgrade_%03d" % (version + 1), None) + if post_downgrade: + post_downgrade(engine) + + return True + + def _migrate_up(self, engine, version, with_data=False): + """migrate up to a new version of the db. + + We allow for data insertion and post checks at every + migration version with special _pre_upgrade_### and + _check_### functions in the main test. + """ + # NOTE(sdague): try block is here because it's impossible to debug + # where a failed data migration happens otherwise + try: + if with_data: + data = None + pre_upgrade = getattr( + self, "_pre_upgrade_%03d" % version, None) + if pre_upgrade: + data = pre_upgrade(engine) + + self.migration_api.upgrade(engine, self.REPOSITORY, version) + self.assertEqual(version, + self.migration_api.db_version(engine, + self.REPOSITORY)) + if with_data: + check = getattr(self, "_check_%03d" % version, None) + if check: + check(engine, data) + except Exception: + LOG.error("Failed to migrate to version %s on engine %s" % + (version, engine)) + raise + + +class TestRackMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn): + """Test sqlalchemy-migrate migrations.""" + USER = "openstack_citest" + PASSWD = "openstack_citest" + DATABASE = "openstack_citest" + + def __init__(self, *args, **kwargs): + super(TestRackMigrations, self).__init__(*args, **kwargs) + + self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), + 'test_migrations.conf') + # Test machines can set the RACK_TEST_MIGRATIONS_CONF variable + # to override the location of the config file for migration testing + self.CONFIG_FILE_PATH = os.environ.get('RACK_TEST_MIGRATIONS_CONF', + self.DEFAULT_CONFIG_FILE) + self.MIGRATE_FILE = rack.db.sqlalchemy.migrate_repo.__file__ + self.REPOSITORY = repository.Repository( + os.path.abspath(os.path.dirname(self.MIGRATE_FILE))) + + def setUp(self): + super(TestRackMigrations, self).setUp() + + if self.migration is None: + self.migration = __import__('rack.db.migration', + globals(), locals(), ['db_initial_version'], -1) + self.INIT_VERSION = self.migration.db_initial_version() + if self.migration_api is None: + temp = __import__('rack.db.sqlalchemy.migration', + globals(), locals(), ['versioning_api'], -1) + self.migration_api = temp.versioning_api + + def assertColumnExists(self, engine, table, column): + t = oslodbutils.get_table(engine, table) + self.assertIn(column, t.c) + + def assertColumnNotExists(self, engine, table, column): + t = oslodbutils.get_table(engine, table) + self.assertNotIn(column, t.c) + + def assertTableNotExists(self, engine, table): + self.assertRaises(sqlalchemy.exc.NoSuchTableError, + oslodbutils.get_table, engine, table) + + def assertIndexExists(self, engine, table, index): + t = oslodbutils.get_table(engine, table) + index_names = [idx.name for idx in t.indexes] + self.assertIn(index, index_names) + + def assertIndexMembers(self, engine, table, index, members): + self.assertIndexExists(engine, table, index) + + t = oslodbutils.get_table(engine, table) + index_columns = None + for idx in t.indexes: + if idx.name == index: + index_columns = idx.columns.keys() + break + + self.assertEqual(sorted(members), sorted(index_columns)) + + +class ProjectTestCase(test.NoDBTestCase): + + def test_all_migrations_have_downgrade(self): + topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../') + py_glob = os.path.join(topdir, "rack", "db", "sqlalchemy", + "migrate_repo", "versions", "*.py") + + missing_downgrade = [] + for path in glob.iglob(py_glob): + has_upgrade = False + has_downgrade = False + with open(path, "r") as f: + for line in f: + if 'def upgrade(' in line: + has_upgrade = True + if 'def downgrade(' in line: + has_downgrade = True + + if has_upgrade and not has_downgrade: + fname = os.path.basename(path) + missing_downgrade.append(fname) + + helpful_msg = (_("The following migrations are missing a downgrade:" + "\n\t%s") % '\n\t'.join(sorted(missing_downgrade))) + self.assertTrue(not missing_downgrade, helpful_msg) diff --git a/rack/tests/fake_policy.py b/rack/tests/fake_policy.py new file mode 100644 index 0000000..117870a --- /dev/null +++ b/rack/tests/fake_policy.py @@ -0,0 +1,377 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +policy_data = """ +{ + "admin_api": "is_admin:True", + + "cells_scheduler_filter:TargetCellFilter": "is_admin:True", + + "context_is_admin": "role:admin or role:administrator", + "compute:create": "", + "compute:create:attach_network": "", + "compute:create:attach_volume": "", + + "compute:get": "", + "compute:get_all": "", + "compute:get_all_tenants": "", + + "compute:update": "", + + "compute:get_instance_metadata": "", + "compute:get_all_instance_metadata": "", + "compute:get_all_instance_system_metadata": "", + "compute:update_instance_metadata": "", + "compute:delete_instance_metadata": "", + + "compute:get_instance_faults": "", + "compute:get_diagnostics": "", + + "compute:get_lock": "", + "compute:lock": "", + "compute:unlock": "", + "compute:unlock_override": "is_admin:True", + + "compute:get_vnc_console": "", + "compute:get_spice_console": "", + "compute:get_rdp_console": "", + "compute:get_console_output": "", + + "compute:associate_floating_ip": "", + "compute:reset_network": "", + "compute:inject_network_info": "", + "compute:add_fixed_ip": "", + "compute:remove_fixed_ip": "", + + "compute:attach_volume": "", + "compute:detach_volume": "", + + "compute:set_admin_password": "", + + "compute:rescue": "", + "compute:unrescue": "", + + "compute:suspend": "", + "compute:resume": "", + + "compute:pause": "", + "compute:unpause": "", + + "compute:start": "", + "compute:stop": "", + + "compute:resize": "", + "compute:confirm_resize": "", + "compute:revert_resize": "", + + "compute:rebuild": "", + + "compute:reboot": "", + + "compute:snapshot": "", + "compute:backup": "", + + "compute:shelve": "", + "compute:shelve_offload": "", + "compute:unshelve": "", + + "compute:security_groups:add_to_instance": "", + "compute:security_groups:remove_from_instance": "", + + "compute:delete": "", + "compute:soft_delete": "", + "compute:force_delete": "", + "compute:restore": "", + "compute:swap_volume": "", + + "compute:volume_snapshot_create": "", + "compute:volume_snapshot_delete": "", + + "compute:v3:servers:start": "", + "compute:v3:servers:stop": "", + "compute_extension:v3:os-access-ips": "", + "compute_extension:accounts": "", + "compute_extension:admin_actions:pause": "", + "compute_extension:admin_actions:unpause": "", + "compute_extension:admin_actions:suspend": "", + "compute_extension:admin_actions:resume": "", + "compute_extension:admin_actions:lock": "", + "compute_extension:admin_actions:unlock": "", + "compute_extension:admin_actions:resetNetwork": "", + "compute_extension:admin_actions:injectNetworkInfo": "", + "compute_extension:admin_actions:createBackup": "", + "compute_extension:admin_actions:migrateLive": "", + "compute_extension:admin_actions:resetState": "", + "compute_extension:admin_actions:migrate": "", + "compute_extension:v3:os-admin-actions:reset_network": "", + "compute_extension:v3:os-admin-actions:inject_network_info": "", + "compute_extension:v3:os-admin-actions:reset_state": "", + "compute_extension:v3:os-admin-password": "", + "compute_extension:aggregates": "rule:admin_api", + "compute_extension:v3:os-aggregates:index": "rule:admin_api", + "compute_extension:v3:os-aggregates:create": "rule:admin_api", + "compute_extension:v3:os-aggregates:show": "rule:admin_api", + "compute_extension:v3:os-aggregates:update": "rule:admin_api", + "compute_extension:v3:os-aggregates:delete": "rule:admin_api", + "compute_extension:v3:os-aggregates:add_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:remove_host": "rule:admin_api", + "compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api", + "compute_extension:agents": "", + "compute_extension:v3:os-agents": "", + "compute_extension:attach_interfaces": "", + "compute_extension:v3:os-attach-interfaces": "", + "compute_extension:baremetal_nodes": "", + "compute_extension:cells": "", + "compute_extension:v3:os-cells": "", + "compute_extension:certificates": "", + "compute_extension:v3:os-certificates:create": "", + "compute_extension:v3:os-certificates:show": "", + "compute_extension:cloudpipe": "", + "compute_extension:cloudpipe_update": "", + "compute_extension:config_drive": "", + "compute_extension:v3:os-config-drive": "", + "compute_extension:console_output": "", + "compute_extension:v3:os-console-output": "", + "compute_extension:consoles": "", + "compute_extension:v3:os-remote-consoles": "", + "compute_extension:createserverext": "", + "compute_extension:v3:os-create-backup": "", + "compute_extension:deferred_delete": "", + "compute_extension:v3:os-deferred-delete": "", + "compute_extension:disk_config": "", + "compute_extension:evacuate": "is_admin:True", + "compute_extension:v3:os-evacuate": "is_admin:True", + "compute_extension:extended_server_attributes": "", + "compute_extension:v3:os-extended-server-attributes": "", + "compute_extension:extended_status": "", + "compute_extension:v3:os-extended-status": "", + "compute_extension:extended_availability_zone": "", + "compute_extension:v3:os-extended-availability-zone": "", + "compute_extension:extended_ips": "", + "compute_extension:extended_ips_mac": "", + "compute_extension:extended_vif_net": "", + "compute_extension:extended_volumes": "", + "compute_extension:v3:os-extended-volumes": "", + "compute_extension:v3:os-extended-volumes:swap": "", + "compute_extension:v3:os-extended-volumes:attach": "", + "compute_extension:v3:os-extended-volumes:detach": "", + "compute_extension:v3:extensions:discoverable": "", + "compute_extension:fixed_ips": "", + "compute_extension:flavor_access": "", + "compute_extension:flavor_access:addTenantAccess": "rule:admin_api", + "compute_extension:flavor_access:removeTenantAccess": "rule:admin_api", + "compute_extension:v3:flavor-access": "", + "compute_extension:v3:flavor-access:remove_tenant_access": + "rule:admin_api", + "compute_extension:v3:flavor-access:add_tenant_access": + "rule:admin_api", + "compute_extension:flavor_disabled": "", + "compute_extension:v3:os-flavor-disabled": "", + "compute_extension:flavor_rxtx": "", + "compute_extension:v3:os-flavor-rxtx": "", + "compute_extension:flavor_swap": "", + "compute_extension:flavorextradata": "", + "compute_extension:flavorextraspecs:index": "", + "compute_extension:flavorextraspecs:show": "", + "compute_extension:flavorextraspecs:create": "is_admin:True", + "compute_extension:flavorextraspecs:update": "is_admin:True", + "compute_extension:flavorextraspecs:delete": "is_admin:True", + "compute_extension:v3:flavor-extra-specs:index": "", + "compute_extension:v3:flavor-extra-specs:show": "", + "compute_extension:v3:flavor-extra-specs:create": "is_admin:True", + "compute_extension:v3:flavor-extra-specs:update": "is_admin:True", + "compute_extension:v3:flavor-extra-specs:delete": "is_admin:True", + "compute_extension:flavormanage": "", + "compute_extension:v3:flavor-manage": "", + "compute_extension:v3:flavors:discoverable": "", + "compute_extension:floating_ip_dns": "", + "compute_extension:floating_ip_pools": "", + "compute_extension:floating_ips": "", + "compute_extension:floating_ips_bulk": "", + "compute_extension:fping": "", + "compute_extension:fping:all_tenants": "is_admin:True", + "compute_extension:hide_server_addresses": "", + "compute_extension:v3:os-hide-server-addresses": "", + "compute_extension:hosts": "", + "compute_extension:v3:os-hosts": "rule:admin_api", + "compute_extension:hypervisors": "", + "compute_extension:v3:os-hypervisors": "rule:admin_api", + "compute_extension:image_size": "", + "compute_extension:instance_actions": "", + "compute_extension:v3:os-instance-actions": "", + "compute_extension:instance_actions:events": "is_admin:True", + "compute_extension:v3:os-instance-actions:events": "is_admin:True", + "compute_extension:instance_usage_audit_log": "", + "compute_extension:keypairs": "", + "compute_extension:keypairs:index": "", + "compute_extension:keypairs:show": "", + "compute_extension:keypairs:create": "", + "compute_extension:keypairs:delete": "", + + "compute_extension:v3:keypairs": "", + "compute_extension:v3:keypairs:index": "", + "compute_extension:v3:keypairs:show": "", + "compute_extension:v3:keypairs:create": "", + "compute_extension:v3:keypairs:delete": "", + "compute_extension:v3:os-lock-server:lock": "", + "compute_extension:v3:os-lock-server:unlock": "", + "compute_extension:v3:os-migrate-server:migrate": "", + "compute_extension:v3:os-migrate-server:migrate_live": "", + "compute_extension:multinic": "", + "compute_extension:v3:os-multinic": "", + "compute_extension:networks": "", + "compute_extension:networks:view": "", + "compute_extension:networks_associate": "", + "compute_extension:os-tenant-networks": "", + "compute_extension:v3:os-pause-server:pause": "", + "compute_extension:v3:os-pause-server:unpause": "", + "compute_extension:v3:os-pci:pci_servers": "", + "compute_extension:v3:os-pci:index": "", + "compute_extension:v3:os-pci:detail": "", + "compute_extension:v3:os-pci:show": "", + "compute_extension:quotas:show": "", + "compute_extension:quotas:update": "", + "compute_extension:quotas:delete": "", + "compute_extension:v3:os-quota-sets:show": "", + "compute_extension:v3:os-quota-sets:update": "", + "compute_extension:v3:os-quota-sets:delete": "", + "compute_extension:v3:os-quota-sets:detail": "", + "compute_extension:rescue": "", + "compute_extension:v3:os-rescue": "", + "compute_extension:security_group_default_rules": "", + "compute_extension:security_groups": "", + "compute_extension:v3:os-security-groups": "", + "compute_extension:server_diagnostics": "", + "compute_extension:v3:os-server-diagnostics": "", + "compute_extension:server_groups": "", + "compute_extension:server_password": "", + "compute_extension:v3:os-server-password": "", + "compute_extension:server_usage": "", + "compute_extension:v3:os-server-usage": "", + "compute_extension:services": "", + "compute_extension:v3:os-services": "", + "compute_extension:shelve": "", + "compute_extension:shelveOffload": "", + "compute_extension:v3:os-shelve:shelve": "", + "compute_extension:v3:os-shelve:shelve_offload": "", + "compute_extension:simple_tenant_usage:show": "", + "compute_extension:simple_tenant_usage:list": "", + "compute_extension:unshelve": "", + "compute_extension:v3:os-shelve:unshelve": "", + "compute_extension:v3:os-suspend-server:suspend": "", + "compute_extension:v3:os-suspend-server:resume": "", + "compute_extension:users": "", + "compute_extension:virtual_interfaces": "", + "compute_extension:virtual_storage_arrays": "", + "compute_extension:volumes": "", + "compute_extension:volume_attachments:index": "", + "compute_extension:volume_attachments:show": "", + "compute_extension:volume_attachments:create": "", + "compute_extension:volume_attachments:update": "", + "compute_extension:volume_attachments:delete": "", + "compute_extension:volumetypes": "", + "compute_extension:zones": "", + "compute_extension:availability_zone:list": "", + "compute_extension:v3:os-availability-zone:list": "", + "compute_extension:availability_zone:detail": "is_admin:True", + "compute_extension:v3:os-availability-zone:detail": "is_admin:True", + "compute_extension:used_limits_for_admin": "is_admin:True", + "compute_extension:migrations:index": "is_admin:True", + "compute_extension:v3:os-migrations:index": "is_admin:True", + "compute_extension:os-assisted-volume-snapshots:create": "", + "compute_extension:os-assisted-volume-snapshots:delete": "", + "compute_extension:console_auth_tokens": "is_admin:True", + "compute_extension:v3:os-console-auth-tokens": "is_admin:True", + "compute_extension:os-server-external-events:create": "rule:admin_api", + "compute_extension:v3:os-server-external-events:create": "rule:admin_api", + + "volume:create": "", + "volume:get": "", + "volume:get_all": "", + "volume:get_volume_metadata": "", + "volume:delete": "", + "volume:update": "", + "volume:delete_volume_metadata": "", + "volume:update_volume_metadata": "", + "volume:attach": "", + "volume:detach": "", + "volume:reserve_volume": "", + "volume:unreserve_volume": "", + "volume:begin_detaching": "", + "volume:roll_detaching": "", + "volume:check_attach": "", + "volume:check_detach": "", + "volume:initialize_connection": "", + "volume:terminate_connection": "", + "volume:create_snapshot": "", + "volume:delete_snapshot": "", + "volume:get_snapshot": "", + "volume:get_all_snapshots": "", + + + "volume_extension:volume_admin_actions:reset_status": "rule:admin_api", + "volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api", + "volume_extension:volume_admin_actions:force_delete": "rule:admin_api", + "volume_extension:volume_actions:upload_image": "", + "volume_extension:types_manage": "", + "volume_extension:types_extra_specs": "", + + + "network:get_all": "", + "network:get": "", + "network:create": "", + "network:delete": "", + "network:associate": "", + "network:disassociate": "", + "network:get_vifs_by_instance": "", + "network:get_vif_by_mac_address": "", + "network:allocate_for_instance": "", + "network:deallocate_for_instance": "", + "network:validate_networks": "", + "network:get_instance_uuids_by_ip_filter": "", + "network:get_instance_id_by_floating_address": "", + "network:setup_networks_on_host": "", + + "network:get_floating_ip": "", + "network:get_floating_ip_pools": "", + "network:get_floating_ip_by_address": "", + "network:get_floating_ips_by_project": "", + "network:get_floating_ips_by_fixed_address": "", + "network:allocate_floating_ip": "", + "network:deallocate_floating_ip": "", + "network:associate_floating_ip": "", + "network:disassociate_floating_ip": "", + "network:release_floating_ip": "", + "network:migrate_instance_start": "", + "network:migrate_instance_finish": "", + + "network:get_fixed_ip": "", + "network:get_fixed_ip_by_address": "", + "network:add_fixed_ip_to_instance": "", + "network:remove_fixed_ip_from_instance": "", + "network:add_network_to_project": "", + "network:get_instance_nw_info": "", + + "network:get_dns_domains": "", + "network:add_dns_entry": "", + "network:modify_dns_entry": "", + "network:delete_dns_entry": "", + "network:get_dns_entries_by_address": "", + "network:get_dns_entries_by_name": "", + "network:create_private_dns_domain": "", + "network:create_public_dns_domain": "", + "network:delete_dns_domain": "" +} +""" diff --git a/rack/tests/policy_fixture.py b/rack/tests/policy_fixture.py new file mode 100644 index 0000000..8c285e3 --- /dev/null +++ b/rack/tests/policy_fixture.py @@ -0,0 +1,73 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import fixtures +from oslo.config import cfg + +from rack.openstack.common import policy as common_policy +import rack.policy +from rack.tests import fake_policy + +CONF = cfg.CONF + + +class PolicyFixture(fixtures.Fixture): + + def setUp(self): + super(PolicyFixture, self).setUp() + self.policy_dir = self.useFixture(fixtures.TempDir()) + self.policy_file_name = os.path.join(self.policy_dir.path, + 'policy.json') + with open(self.policy_file_name, 'w') as policy_file: + policy_file.write(fake_policy.policy_data) + CONF.set_override('policy_file', self.policy_file_name) + rack.policy.reset() + rack.policy.init() + self.addCleanup(rack.policy.reset) + + def set_rules(self, rules): + common_policy.set_rules(common_policy.Rules( + dict((k, common_policy.parse_rule(v)) + for k, v in rules.items()))) + + +class RoleBasedPolicyFixture(fixtures.Fixture): + + def __init__(self, role="admin", *args, **kwargs): + super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs) + self.role = role + + def setUp(self): + """Copy live policy.json file and convert all actions to + allow users of the specified role only + """ + super(RoleBasedPolicyFixture, self).setUp() + policy = json.load(open(CONF.policy_file)) + + # Convert all actions to require specified role + for action, rule in policy.iteritems(): + policy[action] = 'role:%s' % self.role + + self.policy_dir = self.useFixture(fixtures.TempDir()) + self.policy_file_name = os.path.join(self.policy_dir.path, + 'policy.json') + with open(self.policy_file_name, 'w') as policy_file: + json.dump(policy, policy_file) + CONF.set_override('policy_file', self.policy_file_name) + rack.policy.reset() + rack.policy.init() + self.addCleanup(rack.policy.reset) diff --git a/rack/tests/resourceoperator/__init__.py b/rack/tests/resourceoperator/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/tests/resourceoperator/openstack/__init__.py b/rack/tests/resourceoperator/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/tests/resourceoperator/openstack/test_keypairs.py b/rack/tests/resourceoperator/openstack/test_keypairs.py new file mode 100644 index 0000000..f665b03 --- /dev/null +++ b/rack/tests/resourceoperator/openstack/test_keypairs.py @@ -0,0 +1,85 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Unit Tests for rack.resourceoperator.openstack.keypairs +""" + +from oslo.config import cfg + +from rack import exception +from rack.resourceoperator import openstack as os_client +from rack.resourceoperator.openstack import keypairs +from rack import test + +CONF = cfg.CONF + +CREDENTIALS = { + "os_username": "fake", + "os_password": "fake", + "os_tenant_name": "fake", + "os_auth_url": "fake" +} +cfg.set_defaults(os_client.openstack_client_opts, **CREDENTIALS) + + +class FakeKeypairModel(object): + name = "fake_keypair" + private_key = "fake_private_key" + + +class KeypairTestCase(test.NoDBTestCase): + def setUp(self): + super(KeypairTestCase, self).setUp() + self.keypair_client = keypairs.KeypairAPI() + self.nova = os_client.get_nova_client() + self.mox.StubOutWithMock(self.nova, "keypairs") + self.mox.StubOutWithMock(os_client, "get_nova_client") + os_client.get_nova_client().AndReturn(self.nova) + + def test_keypair_create(self): + name = "fake_keypair" + self.nova.keypairs.create(name).AndReturn(FakeKeypairModel()) + self.mox.ReplayAll() + + expected = { + "nova_keypair_id": name, + "private_key": "fake_private_key" + } + values = self.keypair_client.keypair_create(name) + self.assertEquals(expected, values) + + def test_keypair_create_raise_exception(self): + name = "fake_keypair" + self.nova.keypairs.create(name).AndRaise(Exception()) + self.mox.ReplayAll() + + self.assertRaises( + exception.KeypairCreateFailed, + self.keypair_client.keypair_create, name) + + def test_keypair_delete(self): + nova_keypair_id = "fake_keypair" + self.nova.keypairs.delete(nova_keypair_id) + self.mox.ReplayAll() + + self.keypair_client.keypair_delete(nova_keypair_id) + + def test_keypair_delete_raise_exception(self): + nova_keypair_id = "fake_keypair" + self.nova.keypairs.delete(nova_keypair_id).AndRaise(Exception()) + self.mox.ReplayAll() + + self.assertRaises( + exception.KeypairDeleteFailed, + self.keypair_client.keypair_delete, nova_keypair_id) diff --git a/rack/tests/resourceoperator/openstack/test_networks.py b/rack/tests/resourceoperator/openstack/test_networks.py new file mode 100644 index 0000000..7c51c78 --- /dev/null +++ b/rack/tests/resourceoperator/openstack/test_networks.py @@ -0,0 +1,109 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Unit Tests for rack.resourceoperator.openstack.networks +""" + +import mox + +from neutronclient.v2_0 import client as neutron_client +from oslo.config import cfg + +from rack import exception +from rack.resourceoperator import openstack as os_client +from rack.resourceoperator.openstack import networks +from rack import test + +CONF = cfg.CONF + +CREDENTIALS = { + "os_username": "fake", + "os_password": "fake", + "os_tenant_name": "fake", + "os_auth_url": "fake" +} +cfg.set_defaults(os_client.openstack_client_opts, **CREDENTIALS) + + +CIDR = "10.0.0.0/24" + + +class NetworkTestCase(test.NoDBTestCase): + def setUp(self): + super(NetworkTestCase, self).setUp() + self.network_client = networks.NetworkAPI() + self.neutron_mock = self.mox.CreateMock(neutron_client.Client) + self.mox.StubOutWithMock(os_client, "get_neutron_client") + os_client.get_neutron_client().AndReturn(self.neutron_mock) + + def _setup_test_network_create(self, gateway=None, dns_nameservers=None, ext_router=None): + network_id = "fake_network_id" + create_network_response = {"network": {"id": "fake_network_id"}} + self.neutron_mock.create_network(mox.IsA(dict)).AndReturn(create_network_response) + expected_body = { + "subnet":{ + "network_id": network_id, + "ip_version": 4, + "cidr": CIDR + } + } + if gateway: + expected_body["subnet"].update(gateway_ip=gateway) + if dns_nameservers: + expected_body["subnet"].update(dns_nameservers=dns_nameservers) + create_subnet_response = {"subnet": {"id": "fake_subnet_id"}} + self.neutron_mock.create_subnet(expected_body).AndReturn(create_subnet_response) + if ext_router: + self.neutron_mock.add_interface_router(mox.IsA(str), mox.IsA(dict)) + + def test_network_create(self): + self._setup_test_network_create() + self.mox.ReplayAll() + + self.network_client.network_create("fake_name", CIDR) + + def test_network_create_with_parameters(self): + parameters = { + "gateway": "10.0.0.254", + "dns_nameservers": ["8.8.8.8", "8.8.4.4"], + "ext_router": "fake_router" + } + self._setup_test_network_create(**parameters) + self.mox.ReplayAll() + + self.network_client.network_create("fake_name", CIDR, **parameters) + + def test_network_raise_exception(self): + self.neutron_mock.create_network(mox.IsA(dict)).AndRaise(Exception()) + self.mox.ReplayAll() + + self.assertRaises(exception.NetworkCreateFailed, self.network_client.network_create, "fake_name", CIDR) + + def test_network_delete(self): + neutron_network_id = "fake_network_id" + ext_router = "fake_router" + show_network_response = {"network": {"subnets":["fake_subnet1", "fake_subnet2"]}} + self.neutron_mock.show_network(neutron_network_id).AndReturn(show_network_response) + self.neutron_mock.remove_interface_router(ext_router, mox.IsA(dict)).MultipleTimes() + self.neutron_mock.delete_network(neutron_network_id) + self.mox.ReplayAll() + + self.network_client.network_delete(neutron_network_id, ext_router) + + def test_network_delete_raise_exception(self): + neutron_network_id = "fake_network_id" + self.neutron_mock.delete_network(neutron_network_id).AndRaise(Exception()) + self.mox.ReplayAll() + + self.assertRaises(exception.NetworkDeleteFailed, self.network_client.network_delete, neutron_network_id) diff --git a/rack/tests/resourceoperator/openstack/test_processes.py b/rack/tests/resourceoperator/openstack/test_processes.py new file mode 100644 index 0000000..bd6a569 --- /dev/null +++ b/rack/tests/resourceoperator/openstack/test_processes.py @@ -0,0 +1,125 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from rack import test, exception +from rack.resourceoperator import openstack as os_client +import uuid + +from rack.resourceoperator.openstack import processes + + +class fake_process(object): + pass + +class ProcessesTest(test.NoDBTestCase): + def setUp(self): + super(ProcessesTest, self).setUp() + self.process_client = processes.ProcessesAPI() + self.nova = os_client.get_nova_client() + self.mox.StubOutWithMock(self.nova, "servers") + self.mox.StubOutWithMock(os_client, "get_nova_client") + os_client.get_nova_client().AndReturn(self.nova) + self.process_id = unicode(uuid.uuid4()) + + def test_process_create(self): + display_name = "display_name" + glance_image_id = "5aea309f-9638-44de-827d-5125ff7e4689" + nova_flavor_id = "3" + nova_keypair_id = "test" + neutron_securitygroup_ids = ["947dc616-e737-4cb9-b816-52ad80cb9e37", "1892987f-3874-46ef-a487-fb8e925210ce"] + neutron_network_ids = ["a3c6488a-a236-46f7-aab6-8f1fe91ad9ef","43015163-babe-4bee-8fe8-38470d28b2a2"] + metadata = {"metadata": "metadata"} + process_build = fake_process() + process_build.status = "BUILD" + process_build.id = self.process_id + nics = [] + for network_id in neutron_network_ids: + nics.append({"net-id": network_id}) + + self.nova.servers.create(name=display_name, + image=glance_image_id, + flavor=nova_flavor_id, + meta=metadata, + nics=nics, + key_name=nova_keypair_id, + security_groups=neutron_securitygroup_ids)\ + .AndReturn(process_build) + process_active = fake_process() + process_active.status = "ACTIVE" + process_active.id = self.process_id + self.nova.servers.get(self.process_id)\ + .AndReturn(process_active) + self.mox.ReplayAll() + + process_id = self.process_client.process_create(display_name, + glance_image_id, + nova_flavor_id, + nova_keypair_id, + neutron_securitygroup_ids, + neutron_network_ids, + metadata) + self.assertEqual(process_id, self.process_id) + + def test_process_create_raise_exception(self): + display_name = "display_name" + glance_image_id = "5aea309f-9638-44de-827d-5125ff7e4689" + nova_flavor_id = "3" + nova_keypair_id = "test" + neutron_securitygroup_ids = ["947dc616-e737-4cb9-b816-52ad80cb9e37", "1892987f-3874-46ef-a487-fb8e925210ce"] + neutron_network_ids = ["a3c6488a-a236-46f7-aab6-8f1fe91ad9ef","43015163-babe-4bee-8fe8-38470d28b2a2"] + metadata = {"metadata": "metadata"} + process_build = fake_process() + process_build.status = "BUILD" + process_build.id = self.process_id + nics = [] + for network_id in neutron_network_ids: + nics.append({"net-id": network_id}) + + self.nova.servers.create(name=display_name, + image=glance_image_id, + flavor=nova_flavor_id, + meta=metadata, + nics=nics, + key_name=nova_keypair_id, + security_groups=neutron_securitygroup_ids)\ + .AndReturn(process_build) + process_active = fake_process() + process_active.status = "ERROR" + process_active.id = self.process_id + self.nova.servers.get(self.process_id)\ + .AndReturn(process_active) + self.mox.ReplayAll() + self.assertRaises( + exception.ProcessCreateFailed, + self.process_client.process_create, + display_name, + glance_image_id, + nova_flavor_id, + nova_keypair_id, + neutron_securitygroup_ids, + neutron_network_ids, + metadata) + + def test_process_delete(self): + self.nova.servers.delete(self.process_id) + self.mox.ReplayAll() + + self.process_client.process_delete(self.process_id) + + def test_process_delete_raise_exception(self): + self.nova.servers.delete(self.process_id).AndRaise(Exception) + self.mox.ReplayAll() + + self.assertRaises(exception.ProcessDeleteFailed, + self.process_client.process_delete, + self.process_id) diff --git a/rack/tests/resourceoperator/openstack/test_securitygroups.py b/rack/tests/resourceoperator/openstack/test_securitygroups.py new file mode 100644 index 0000000..8c33de9 --- /dev/null +++ b/rack/tests/resourceoperator/openstack/test_securitygroups.py @@ -0,0 +1,164 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Unit Tests for rack.resourceoperator.openstack.securitygroups +""" +from oslo.config import cfg + +from rack import exception +from rack.resourceoperator import openstack as os_client +from rack.resourceoperator.openstack import securitygroups +from rack import test + +import mox + +CONF = cfg.CONF + +CREDENTIALS = { + "os_username": "fake", + "os_password": "fake", + "os_tenant_name": "fake", + "os_auth_url": "fake" +} +cfg.set_defaults(os_client.openstack_client_opts, **CREDENTIALS) + + +def fake_securitygroup(): + return {"security_group":{"id":"neutron_securitygroup_id"}} + + +def fake_securitygrouprule(): + return {"direction": "ingress", + "ethertype": "IPv4", + "security_group_id": "neutron_securitygroup_id", + "protocol": "tcp", + "port_range_min": None, + "port_range_max": "80" + } + + +class SecuritygroupTestCase(test.NoDBTestCase): + def setUp(self): + super(SecuritygroupTestCase, self).setUp() + self.securitygroup_client = securitygroups.SecuritygroupAPI() + self.neutron = os_client.get_neutron_client() + self.mox.StubOutWithMock(self.neutron, "create_security_group") + self.mox.StubOutWithMock(self.neutron, "delete_security_group") + self.mox.StubOutWithMock(os_client, "get_neutron_client") + os_client.get_neutron_client().AndReturn(self.neutron) + + def test_securitygroup_create(self): + name = "securitygroup" + self.neutron.create_security_group({"security_group": {"name": name}}).AndReturn(fake_securitygroup()) + self.mox.ReplayAll() + + expected = "neutron_securitygroup_id" + + values = self.securitygroup_client.securitygroup_create(name) + self.assertEquals(expected, values) + + def test_securitygroup_create_raise_exception(self): + name = "securitygroup" + self.neutron.create_security_group({"security_group": {"name": name}}).AndRaise(Exception()) + self.mox.ReplayAll() + + self.assertRaises( + exception.SecuritygroupCreateFailed, + self.securitygroup_client.securitygroup_create, name) + + def test_securitygroup_delete(self): + neutron_securitygroup_id = "fake_securitygroup" + self.neutron.delete_security_group(neutron_securitygroup_id) + self.mox.ReplayAll() + + self.assertIsNone(self.securitygroup_client.securitygroup_delete(neutron_securitygroup_id)) + + def test_securitygroup_delete_raise_exception(self): + neutron_securitygroup_id = "fake_securitygroup" + self.neutron.delete_security_group(neutron_securitygroup_id).AndRaise(Exception()) + self.mox.ReplayAll() + + self.assertRaises( + exception.SecuritygroupDeleteFailed, + self.securitygroup_client.securitygroup_delete, neutron_securitygroup_id) + + +class SecuritygroupruleTestCase(test.NoDBTestCase): + def setUp(self): + super(SecuritygroupruleTestCase, self).setUp() + self.securitygrouprule_client = securitygroups.SecuritygroupruleAPI() + self.neutron = os_client.get_neutron_client() + self.mox.StubOutWithMock(self.neutron, "create_security_group_rule") + self.mox.StubOutWithMock(os_client, "get_neutron_client") + os_client.get_neutron_client().AndReturn(self.neutron) + + def test_securitygrouprule_create_remote_ip_prefix(self): + rule = fake_securitygrouprule() + self.neutron.create_security_group_rule({"security_group_rule": + {"direction": rule["direction"], + "ethertype": rule["ethertype"], + "security_group_id": rule["security_group_id"], + "protocol": rule["protocol"], + "port_range_min": rule["port_range_max"], + "port_range_max": rule["port_range_max"], + "remote_ip_prefix": "192.168.1.1/32" + }}) + self.mox.ReplayAll() + + self.assertIsNone( + self.securitygrouprule_client.securitygrouprule_create( + rule["security_group_id"], + rule["protocol"], + port_range_min=rule["port_range_min"], + port_range_max=rule["port_range_max"], + remote_ip_prefix="192.168.1.1/32", + )) + + def test_securitygrouprule_create_remote_group_id(self): + rule = fake_securitygrouprule() + self.neutron.create_security_group_rule({"security_group_rule": + {"direction": rule["direction"], + "ethertype": rule["ethertype"], + "security_group_id": rule["security_group_id"], + "protocol": rule["protocol"], + "port_range_min": rule["port_range_max"], + "port_range_max": rule["port_range_max"], + "remote_group_id": "remote_neutron_securitygroup_id" + }}) + self.mox.ReplayAll() + + self.assertIsNone( + self.securitygrouprule_client.securitygrouprule_create( + rule["security_group_id"], + rule["protocol"], + port_range_min=rule["port_range_min"], + port_range_max=rule["port_range_max"], + remote_neutron_securitygroup_id="remote_neutron_securitygroup_id", + )) + + def test_securitygrouprule_create_raise_exception(self): + rule = fake_securitygrouprule() + self.neutron.create_security_group_rule(mox.IgnoreArg()).\ + AndRaise(Exception()) + self.mox.ReplayAll() + + self.assertRaises( + exception.SecuritygroupCreateFailed, + self.securitygrouprule_client.securitygrouprule_create, + rule["security_group_id"], + rule["protocol"], + port_range_min=rule["port_range_min"], + port_range_max=rule["port_range_max"], + remote_neutron_securitygroup_id="remote_neutron_securitygroup_id" + ) diff --git a/rack/tests/resourceoperator/test_manager.py b/rack/tests/resourceoperator/test_manager.py new file mode 100644 index 0000000..f0b1fa0 --- /dev/null +++ b/rack/tests/resourceoperator/test_manager.py @@ -0,0 +1,391 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Unit Tests for rack.resourceoperator.manager +""" + +import mox +from oslo.config import cfg +import uuid + +from rack import context +from rack import db +from rack import exception +from rack.resourceoperator import manager as operator_manager +from rack import test +from __builtin__ import Exception + +CONF = cfg.CONF + +GID = unicode(uuid.uuid4()) +KEYPAIR_ID = unicode(uuid.uuid4()) +NETWORK_ID = unicode(uuid.uuid4()) +NEUTRON_NETWORK_ID = unicode(uuid.uuid4()) + +NOVA_INSTANCE_ID = unicode(uuid.uuid4()) + + + +def fake_keypair_create(name): + return { + "nova_keypair_id": name, + "name": name + } + +def fake_securitygroup_create(name): + return "neutron_securitygroup_id" + + +def fake_securitygrouprule_create(neutron_securitygroup_id, protocol, + port_range_min=None, port_range_max=None, + remote_neutron_securitygroup_id=None, remote_ip_prefix=None, + direction="ingress", ethertype="IPv4"): + pass + + +def fake_network_create(): + return NEUTRON_NETWORK_ID + + +class ResourceOperatorManagerKeypairTestCase(test.NoDBTestCase): + def setUp(self): + super(ResourceOperatorManagerKeypairTestCase, self).setUp() + self.manager = operator_manager.ResourceOperatorManager() + self.context = context.RequestContext('fake_user', 'fake_project') + + def test_keypair_create(self): + self.stubs.Set(self.manager.keypair_client, "keypair_create", fake_keypair_create) + self.mox.StubOutWithMock(db, "keypair_update") + db.keypair_update(self.context, GID, KEYPAIR_ID, mox.IsA(dict)) + self.mox.ReplayAll() + + self.manager.keypair_create(self.context, GID, KEYPAIR_ID, "fake_keypair") + + def test_keypair_create_raise_keypair_create_failed(self): + self.mox.StubOutWithMock(self.manager.keypair_client, "keypair_create") + self.manager.keypair_client.keypair_create(mox.IsA(str))\ + .AndRaise(exception.KeypairCreateFailed()) + self.mox.StubOutWithMock(db, "keypair_update") + db.keypair_update(self.context, GID, KEYPAIR_ID, {"status": "ERROR"}) + self.mox.ReplayAll() + + self.manager.keypair_create(self.context, GID, KEYPAIR_ID, "fake_keypair") + + def test_keypair_create_keypair_not_found(self): + self.stubs.Set(self.manager.keypair_client, "keypair_create", fake_keypair_create) + self.mox.StubOutWithMock(db, "keypair_update") + db.keypair_update(self.context, GID, KEYPAIR_ID, mox.IsA(dict))\ + .AndRaise(exception.KeypairNotFound(keypair_id=KEYPAIR_ID)) + self.mox.ReplayAll() + + self.manager.keypair_create(self.context, GID, KEYPAIR_ID, "fake_keypair") + + def test_keypair_delete(self): + self.mox.StubOutWithMock(self.manager.keypair_client, "keypair_delete") + self.manager.keypair_client.keypair_delete(mox.IsA(str)) + self.mox.ReplayAll() + + self.manager.keypair_delete(self.context, "fake_keypair") + + def test_keypair_delete_raise_keypair_delete_failed(self): + self.mox.StubOutWithMock(self.manager.keypair_client, "keypair_delete") + self.manager.keypair_client.keypair_delete(mox.IsA(str))\ + .AndRaise(exception.KeypairDeleteFailed()) + self.mox.ReplayAll() + + self.manager.keypair_delete(self.context, "fake_keypair") + + +class ResourceOperatorManagerNetworkTestCase(test.NoDBTestCase): + def setUp(self): + super(ResourceOperatorManagerNetworkTestCase, self).setUp() + self.manager = operator_manager.ResourceOperatorManager() + self.context = context.RequestContext('fake_user', 'fake_project') + + def test_create_network(self): + self.stubs.Set(self.manager.network_client, "network_create", fake_network_create) + self.mox.StubOutWithMock(db, "network_update") + expected_values = {"neutron_network_id": NEUTRON_NETWORK_ID, + "status": "ACTIVE"} + db.network_update(self.context, NETWORK_ID, expected_values) + self.mox.ReplayAll() + + network = {} + network["network_id"] = NETWORK_ID + self.manager.network_create(self.context, network) + + def test_create_network_exception_create_faild(self): + self.mox.StubOutWithMock(self.manager.network_client, "network_create") + self.manager.network_client.network_create( + mox.IsA(str), mox.IsA(str), mox.IsA(str), mox.IsA(str), mox.IsA(str),)\ + .AndRaise(Exception()) + self.mox.StubOutWithMock(db, "network_update") + expected_values = {"status": "ERROR"} + db.network_update(self.context, NETWORK_ID, expected_values) + self.mox.ReplayAll() + + network = { + "network_id": NETWORK_ID, + "display_name": "fake_name", + "subnet": "10.0.0.0/24", + "gateway": "fake_gateway", + "dns_nameservers": "fake_dns_nameservers", + "ext_router": "fake_router"} + self.manager.network_create(self.context, network) + + def test_create_network_exception_db_update_faild(self): + self.stubs.Set(self.manager.network_client, "network_create", fake_network_create) + self.mox.StubOutWithMock(db, "network_update") + expected_values = {"neutron_network_id": NEUTRON_NETWORK_ID, + "status": "ACTIVE"} + db.network_update(self.context, NETWORK_ID, expected_values)\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + network = {} + network["network_id"] = NETWORK_ID + self.manager.network_create(self.context, network) + + def test_delete_network(self): + ext_router = "fake_ext_router" + self.mox.StubOutWithMock(self.manager.network_client, "network_delete") + self.manager.network_client.network_delete(NEUTRON_NETWORK_ID, ext_router) + self.mox.ReplayAll() + + self.manager.network_delete(self.context, NEUTRON_NETWORK_ID, ext_router) + + def test_delete_network_exception_delete_faild(self): + ext_router = "fake_ext_router" + self.mox.StubOutWithMock(self.manager.network_client, "network_delete") + self.manager.network_client.network_delete(NEUTRON_NETWORK_ID, ext_router)\ + .AndRaise(Exception()) + self.mox.ReplayAll() + + self.manager.network_delete(self.context, NEUTRON_NETWORK_ID, ext_router) + + +class ResourceOperatorManagerSecuritygroupTestCase(test.NoDBTestCase): + def setUp(self): + super(ResourceOperatorManagerSecuritygroupTestCase, self).setUp() + self.manager = operator_manager.ResourceOperatorManager() + self.context = context.RequestContext('fake_user', 'fake_project') + self.securitygroup_id = "securitygroup_id" + self.name = "securitygroup_name" + securitygrouprule = {} + securitygrouprule["protocol"] = "" + + def _securitygroups(self): + return [ + { + "protocol": "tcp", + "port_range_min": None, + "port_range_max": "80", + "remote_neutron_securitygroup_id": None, + "remote_ip_prefix": "192.168.1.1/32", + }, + { + "protocol": "tcp", + "port_range_min": "1", + "port_range_max": "80", + "remote_neutron_securitygroup_id": "fake", + "remote_ip_prefix": None, + }, + ] + + + def test_securitygroup_create(self): + self.stubs.Set(self.manager.securitygroup_client, "securitygroup_create", fake_securitygroup_create) + self.stubs.Set(self.manager.securitygrouprule_client, "securitygrouprule_create", fake_securitygrouprule_create) + self.mox.StubOutWithMock(db, "securitygroup_update") + values = {} + values["status"] = "ACTIVE" + values["neutron_securitygroup_id"] = fake_securitygroup_create(self.name) + db.securitygroup_update(self.context, GID, self.securitygroup_id, values) + self.mox.ReplayAll() + + self.manager.securitygroup_create(self.context, GID, self.securitygroup_id, self.name, self._securitygroups()) + + def test_securitygroup_create_no_securityrules(self): + self.stubs.Set(self.manager.securitygroup_client, "securitygroup_create", fake_securitygroup_create) + self.mox.StubOutWithMock(db, "securitygroup_update") + values = {} + values["status"] = "ACTIVE" + values["neutron_securitygroup_id"] = fake_securitygroup_create(self.name) + db.securitygroup_update(self.context, GID, self.securitygroup_id, values) + self.mox.ReplayAll() + + self.manager.securitygroup_create(self.context, GID, self.securitygroup_id, self.name, []) + + def test_securitygroup_create_raise_securitygroup_create_failed(self): + self.mox.StubOutWithMock(self.manager.securitygroup_client, "securitygroup_create") + self.manager.securitygroup_client.securitygroup_create(mox.IsA(str))\ + .AndRaise(exception.SecuritygroupCreateFailed()) + self.mox.StubOutWithMock(db, "securitygroup_update") + db.securitygroup_update(self.context, GID, self.securitygroup_id, {"status": "ERROR"}) + self.mox.ReplayAll() + + self.manager.securitygroup_create(self.context, GID, self.securitygroup_id, self.name, []) + + def test_securitygroup_create_raise_securitygrouprule_create_failed(self): + self.stubs.Set(self.manager.securitygroup_client, "securitygroup_create", fake_securitygroup_create) + self.mox.StubOutWithMock(self.manager.securitygrouprule_client, "securitygrouprule_create") + self.manager.securitygrouprule_client.securitygrouprule_create( + mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg())\ + .AndRaise(exception.SecuritygroupCreateFailed()) + self.mox.StubOutWithMock(db, "securitygroup_update") + values = {} + values["status"] = "ERROR" + values["neutron_securitygroup_id"] = fake_securitygroup_create(self.name) + db.securitygroup_update(self.context, GID, self.securitygroup_id, values) + self.mox.ReplayAll() + + self.manager.securitygroup_create(self.context, GID, self.securitygroup_id, self.name, self._securitygroups()) + + def test_securitygroup_create_securitygroup_not_found(self): + self.stubs.Set(self.manager.securitygroup_client, "securitygroup_create", fake_securitygroup_create) + self.mox.StubOutWithMock(db, "securitygroup_update") + values = {} + values["status"] = "ACTIVE" + values["neutron_securitygroup_id"] = fake_securitygroup_create(self.name) + db.securitygroup_update(self.context, GID, self.securitygroup_id, values)\ + .AndRaise(exception.SecuritygroupNotFound(securitygroup_id=self.securitygroup_id)) + values["status"] = "ERROR" + db.securitygroup_update(self.context, GID, self.securitygroup_id, values) + self.mox.ReplayAll() + + self.manager.securitygroup_create(self.context, GID, self.securitygroup_id, self.name, []) + + def test_securitygroup_delete(self): + self.mox.StubOutWithMock(self.manager.securitygroup_client, "securitygroup_delete") + self.manager.securitygroup_client.securitygroup_delete(mox.IsA(str)) + self.mox.ReplayAll() + + self.manager.securitygroup_delete(self.context, self.securitygroup_id) + + def test_securitygroup_delete_raise_securitygroup_delete_failed(self): + self.mox.StubOutWithMock(self.manager.securitygroup_client, "securitygroup_delete") + self.manager.securitygroup_client.securitygroup_delete(mox.IsA(str))\ + .AndRaise(exception.SecuritygroupDeleteFailed()) + self.mox.ReplayAll() + + self.manager.securitygroup_delete(self.context, self.securitygroup_id) + + +class ResourceOperatorManagerProcessesTestCase(test.NoDBTestCase): + def setUp(self): + super(ResourceOperatorManagerProcessesTestCase, self).setUp() + self.manager = operator_manager.ResourceOperatorManager() + self.context = context.RequestContext('fake_user', 'fake_project') + self.nova_instance_id = unicode(uuid.uuid4()) + + def test_processes_create(self): + self.mox.StubOutWithMock(self.manager.process_client, "process_create") + pid = "pida309f-9638-44de-827d-5125ff7e9865" + ppid = "ppid309f-9638-44de-827d-5125ff7e1968" + gid = "gida309f-9638-44de-827d-5125ff7e1246" + display_name = "display_name" + glance_image_id = "5aea309f-9638-44de-827d-5125ff7e4689" + nova_flavor_id = "3" + nova_keypair_id = "test" + neutron_securitygroup_ids = ["947dc616-e737-4cb9-b816-52ad80cb9e37", "1892987f-3874-46ef-a487-fb8e925210ce"] + neutron_network_ids = ["a3c6488a-a236-46f7-aab6-8f1fe91ad9ef","43015163-babe-4bee-8fe8-38470d28b2a2"] + metadata = {"metadata": "metadata"} + self.manager.process_client.process_create(display_name, + glance_image_id, + nova_flavor_id, + nova_keypair_id, + neutron_securitygroup_ids, + neutron_network_ids, + metadata)\ + .AndReturn(self.nova_instance_id) + + self.mox.StubOutWithMock(db, "process_update") + db.process_update(self.context, + gid, + pid, + {"nova_instance_id": self.nova_instance_id, + "status": "ACTIVE"}) + self.mox.ReplayAll() + + self.manager.process_create(self.context, + pid, + ppid, + gid, + display_name, + glance_image_id, + nova_flavor_id, + nova_keypair_id, + neutron_securitygroup_ids, + neutron_network_ids, + metadata) + + def test_processes_create_catch_exception(self): + self.mox.StubOutWithMock(self.manager.process_client, "process_create") + pid = "pida309f-9638-44de-827d-5125ff7e9865" + ppid = "ppid309f-9638-44de-827d-5125ff7e1968" + gid = "gida309f-9638-44de-827d-5125ff7e1246" + display_name = "display_name" + glance_image_id = "5aea309f-9638-44de-827d-5125ff7e4689" + nova_flavor_id = "3" + nova_keypair_id = "test" + neutron_securitygroup_ids = ["947dc616-e737-4cb9-b816-52ad80cb9e37", "1892987f-3874-46ef-a487-fb8e925210ce"] + neutron_network_ids = ["a3c6488a-a236-46f7-aab6-8f1fe91ad9ef","43015163-babe-4bee-8fe8-38470d28b2a2"] + metadata = {"metadata": "metadata"} + self.manager.process_client.process_create(display_name, + glance_image_id, + nova_flavor_id, + nova_keypair_id, + neutron_securitygroup_ids, + neutron_network_ids, + metadata)\ + .AndRaise(exception.ProcessCreateFailed()) + + self.mox.StubOutWithMock(db, "process_update") + db.process_update(self.context, + gid, + pid, + {"status": "ERROR"}) + self.mox.ReplayAll() + + self.manager.process_create(self.context, + pid, + ppid, + gid, + display_name, + glance_image_id, + nova_flavor_id, + nova_keypair_id, + neutron_securitygroup_ids, + neutron_network_ids, + metadata) + + def test_process_delete(self): + self.mox.StubOutWithMock(self.manager.process_client, "process_delete") + self.manager.process_client.process_delete(self.nova_instance_id) + self.mox.ReplayAll() + + self.manager.process_delete(self.context, self.nova_instance_id) + + def test_process_delete_catch_exception(self): + self.mox.StubOutWithMock(self.manager.process_client, "process_delete") + self.manager.process_client.process_delete(self.nova_instance_id)\ + .AndRaise(exception.ProcessDeleteFailed()) + self.mox.ReplayAll() + + self.manager.process_delete(self.context, self.nova_instance_id) \ No newline at end of file diff --git a/rack/tests/resourceoperator/test_rpcapi.py b/rack/tests/resourceoperator/test_rpcapi.py new file mode 100644 index 0000000..8b8c34c --- /dev/null +++ b/rack/tests/resourceoperator/test_rpcapi.py @@ -0,0 +1,97 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Unit Tests for rack.resourceoperator.rpcapi +""" + +import mox +from oslo.config import cfg + +from rack import context +from rack.resourceoperator import rpcapi as operator_rpcapi +from rack import test + +CONF = cfg.CONF + +class ResourceOperatorRpcAPITestCase(test.NoDBTestCase): + def _test_operator_api(self, method, rpc_method, version=None, + fanout=None, host=None, **kwargs): + ctxt = context.RequestContext('fake_user', 'fake_project') + + rpcapi = operator_rpcapi.ResourceOperatorAPI() + self.assertIsNotNone(rpcapi.client) + self.assertEqual(rpcapi.client.target.topic, CONF.resourceoperator_topic) + + expected_retval = 'foo' if rpc_method == 'call' else None + expected_version = version + expected_fanout = fanout + expected_server = host + expected_kwargs = kwargs.copy() + if host: + kwargs['host'] = host + + self.mox.StubOutWithMock(rpcapi, 'client') + + rpcapi.client.can_send_version( + mox.IsA(str)).MultipleTimes().AndReturn(True) + + prepare_kwargs = {} + if expected_fanout: + prepare_kwargs['fanout'] = True + if expected_version: + prepare_kwargs['version'] = expected_version + if expected_server: + prepare_kwargs['server'] = expected_server + rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client) + + rpc_method = getattr(rpcapi.client, rpc_method) + + rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo') + + self.mox.ReplayAll() + + rpcapi.client.can_send_version('I fool you mox') + + retval = getattr(rpcapi, method)(ctxt, **kwargs) + self.assertEqual(retval, expected_retval) + + + def test_keypair_create(self): + self._test_operator_api('keypair_create', rpc_method='cast', + host='fake_host', gid='fake_gid', keypair_id='fake_keypair_id', name='fake_name') + + def test_keypair_delete(self): + self._test_operator_api('keypair_delete', rpc_method='cast', + host='fake_host', nova_keypair_id='fake_nova_keypair_id') + + def test_securitygroup_create(self): + self._test_operator_api('securitygroup_create', rpc_method='cast', + host='fake_host', gid='fake_gid', securitygroup_id='fake_securitygroup_id', + name='fake_name', securitygrouprules='fake_rules') + + def test_securitygroup_delete(self): + self._test_operator_api('securitygroup_delete', rpc_method='cast', + host='fake_host', neutron_securitygroup_id='fake_neutron_securitygroup_id') + + def test_network_create(self): + network = {"network_id":"fake_id"} + self._test_operator_api('network_create', + rpc_method='cast', + host='fake_host', + network=network) + + def test_network_delete(self): + self._test_operator_api('network_delete', rpc_method='cast', + host='fake_host', neutron_network_id='fake_neutron_network_id', + ext_router='fake_ext_router') diff --git a/rack/tests/servicegroup/__init__.py b/rack/tests/servicegroup/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/rack/tests/servicegroup/test_db_servicegroup.py b/rack/tests/servicegroup/test_db_servicegroup.py new file mode 100644 index 0000000..726876b --- /dev/null +++ b/rack/tests/servicegroup/test_db_servicegroup.py @@ -0,0 +1,144 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import fixtures + +from rack import context +from rack import db +from rack.openstack.common import timeutils +from rack import service +from rack import servicegroup +from rack import test + + +class ServiceFixture(fixtures.Fixture): + + def __init__(self, host, binary, topic): + super(ServiceFixture, self).__init__() + self.host = host + self.binary = binary + self.topic = topic + self.serv = None + + def setUp(self): + super(ServiceFixture, self).setUp() + self.serv = service.Service(self.host, + self.binary, + self.topic, + 'rack.tests.test_service.FakeManager', + 1, 1) + self.addCleanup(self.serv.kill) + + +class DBServiceGroupTestCase(test.TestCase): + + def setUp(self): + super(DBServiceGroupTestCase, self).setUp() + servicegroup.API._driver = None + self.flags(servicegroup_driver='db') + self.down_time = 15 + self.flags(enable_new_services=True) + self.flags(service_down_time=self.down_time) + self.servicegroup_api = servicegroup.API() + self._host = 'foo' + self._binary = 'rack-fake' + self._topic = 'unittest' + self._ctx = context.get_admin_context() + + def test_DB_driver(self): + serv = self.useFixture( + ServiceFixture(self._host, self._binary, self._topic)).serv + serv.start() + service_ref = db.service_get_by_args(self._ctx, + self._host, + self._binary) + + self.assertTrue(self.servicegroup_api.service_is_up(service_ref)) + self.useFixture(test.TimeOverride()) + timeutils.advance_time_seconds(self.down_time + 1) + self.servicegroup_api._driver._report_state(serv) + service_ref = db.service_get_by_args(self._ctx, + self._host, + self._binary) + + self.assertTrue(self.servicegroup_api.service_is_up(service_ref)) + serv.stop() + timeutils.advance_time_seconds(self.down_time + 1) + service_ref = db.service_get_by_args(self._ctx, + self._host, + self._binary) + self.assertFalse(self.servicegroup_api.service_is_up(service_ref)) + + def test_get_all(self): + host1 = self._host + '_1' + host2 = self._host + '_2' + + serv1 = self.useFixture( + ServiceFixture(host1, self._binary, self._topic)).serv + serv1.start() + + serv2 = self.useFixture( + ServiceFixture(host2, self._binary, self._topic)).serv + serv2.start() + + service_ref1 = db.service_get_by_args(self._ctx, + host1, + self._binary) + service_ref2 = db.service_get_by_args(self._ctx, + host2, + self._binary) + + services = self.servicegroup_api.get_all(self._topic) + + self.assertIn(service_ref1['host'], services) + self.assertIn(service_ref2['host'], services) + + service_id = self.servicegroup_api.get_one(self._topic) + self.assertIn(service_id, services) + + def test_service_is_up(self): + fts_func = datetime.datetime.fromtimestamp + fake_now = 1000 + down_time = 15 + self.flags(service_down_time=down_time) + self.mox.StubOutWithMock(timeutils, 'utcnow') + self.servicegroup_api = servicegroup.API() + + # Up (equal) + timeutils.utcnow().AndReturn(fts_func(fake_now)) + service = {'updated_at': fts_func(fake_now - self.down_time), + 'created_at': fts_func(fake_now - self.down_time)} + self.mox.ReplayAll() + result = self.servicegroup_api.service_is_up(service) + self.assertTrue(result) + + self.mox.ResetAll() + # Up + timeutils.utcnow().AndReturn(fts_func(fake_now)) + service = {'updated_at': fts_func(fake_now - self.down_time + 1), + 'created_at': fts_func(fake_now - self.down_time + 1)} + self.mox.ReplayAll() + result = self.servicegroup_api.service_is_up(service) + self.assertTrue(result) + + self.mox.ResetAll() + # Down + timeutils.utcnow().AndReturn(fts_func(fake_now)) + service = {'updated_at': fts_func(fake_now - self.down_time - 3), + 'created_at': fts_func(fake_now - self.down_time - 3)} + self.mox.ReplayAll() + result = self.servicegroup_api.service_is_up(service) + self.assertFalse(result) diff --git a/rack/tests/servicegroup/test_mc_servicegroup.py b/rack/tests/servicegroup/test_mc_servicegroup.py new file mode 100644 index 0000000..35bbbc7 --- /dev/null +++ b/rack/tests/servicegroup/test_mc_servicegroup.py @@ -0,0 +1,209 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import fixtures + +from rack import context +from rack import db +from rack.openstack.common import timeutils +from rack import service +from rack import servicegroup +from rack import test + + +class ServiceFixture(fixtures.Fixture): + + def __init__(self, host, binary, topic): + super(ServiceFixture, self).__init__() + self.host = host + self.binary = binary + self.topic = topic + self.serv = None + + def setUp(self): + super(ServiceFixture, self).setUp() + self.serv = service.Service(self.host, + self.binary, + self.topic, + 'rack.tests.test_service.FakeManager', + 1, 1) + self.addCleanup(self.serv.kill) + + +class MemcachedServiceGroupTestCase(test.TestCase): + + def setUp(self): + super(MemcachedServiceGroupTestCase, self).setUp() + servicegroup.API._driver = None + self.flags(servicegroup_driver='mc') + self.down_time = 15 + self.flags(enable_new_services=True) + self.flags(service_down_time=self.down_time) + self.servicegroup_api = servicegroup.API(test=True) + self._host = 'foo' + self._binary = 'rack-fake' + self._topic = 'unittest' + self._ctx = context.get_admin_context() + + def test_memcached_driver(self): + serv = self.useFixture( + ServiceFixture(self._host, self._binary, self._topic)).serv + serv.start() + service_ref = db.service_get_by_args(self._ctx, + self._host, + self._binary) + hostkey = str("%s:%s" % (self._topic, self._host)) + self.servicegroup_api._driver.mc.set(hostkey, + timeutils.utcnow(), + time=self.down_time) + + self.assertTrue(self.servicegroup_api.service_is_up(service_ref)) + self.useFixture(test.TimeOverride()) + timeutils.advance_time_seconds(self.down_time + 1) + self.servicegroup_api._driver._report_state(serv) + service_ref = db.service_get_by_args(self._ctx, + self._host, + self._binary) + + self.assertTrue(self.servicegroup_api.service_is_up(service_ref)) + serv.stop() + timeutils.advance_time_seconds(self.down_time + 1) + service_ref = db.service_get_by_args(self._ctx, + self._host, + self._binary) + self.assertFalse(self.servicegroup_api.service_is_up(service_ref)) + + def test_get_all(self): + host1 = self._host + '_1' + host2 = self._host + '_2' + host3 = self._host + '_3' + + serv1 = self.useFixture( + ServiceFixture(host1, self._binary, self._topic)).serv + serv1.start() + + serv2 = self.useFixture( + ServiceFixture(host2, self._binary, self._topic)).serv + serv2.start() + + serv3 = self.useFixture( + ServiceFixture(host3, self._binary, self._topic)).serv + serv3.start() + + db.service_get_by_args(self._ctx, host1, self._binary) + db.service_get_by_args(self._ctx, host2, self._binary) + db.service_get_by_args(self._ctx, host3, self._binary) + + host1key = str("%s:%s" % (self._topic, host1)) + host2key = str("%s:%s" % (self._topic, host2)) + host3key = str("%s:%s" % (self._topic, host3)) + self.servicegroup_api._driver.mc.set(host1key, + timeutils.utcnow(), + time=self.down_time) + self.servicegroup_api._driver.mc.set(host2key, + timeutils.utcnow(), + time=self.down_time) + self.servicegroup_api._driver.mc.set(host3key, + timeutils.utcnow(), + time=-1) + + services = self.servicegroup_api.get_all(self._topic) + + self.assertIn(host1, services) + self.assertIn(host2, services) + self.assertNotIn(host3, services) + + service_id = self.servicegroup_api.get_one(self._topic) + self.assertIn(service_id, services) + + def test_service_is_up(self): + serv = self.useFixture( + ServiceFixture(self._host, self._binary, self._topic)).serv + serv.start() + service_ref = db.service_get_by_args(self._ctx, + self._host, + self._binary) + fake_now = 1000 + down_time = 15 + self.flags(service_down_time=down_time) + self.mox.StubOutWithMock(timeutils, 'utcnow_ts') + self.servicegroup_api = servicegroup.API() + hostkey = str("%s:%s" % (self._topic, self._host)) + + # Up (equal) + timeutils.utcnow_ts().AndReturn(fake_now) + timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1) + self.mox.ReplayAll() + self.servicegroup_api._driver.mc.set(hostkey, + timeutils.utcnow(), + time=down_time) + result = self.servicegroup_api.service_is_up(service_ref) + self.assertTrue(result) + + self.mox.ResetAll() + # Up + timeutils.utcnow_ts().AndReturn(fake_now) + timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2) + self.mox.ReplayAll() + self.servicegroup_api._driver.mc.set(hostkey, + timeutils.utcnow(), + time=down_time) + result = self.servicegroup_api.service_is_up(service_ref) + self.assertTrue(result) + + self.mox.ResetAll() + # Down + timeutils.utcnow_ts().AndReturn(fake_now) + timeutils.utcnow_ts().AndReturn(fake_now + down_time) + self.mox.ReplayAll() + self.servicegroup_api._driver.mc.set(hostkey, + timeutils.utcnow(), + time=down_time) + result = self.servicegroup_api.service_is_up(service_ref) + self.assertFalse(result) + + self.mox.ResetAll() + # Down + timeutils.utcnow_ts().AndReturn(fake_now) + timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1) + self.mox.ReplayAll() + self.servicegroup_api._driver.mc.set(hostkey, + timeutils.utcnow(), + time=down_time) + result = self.servicegroup_api.service_is_up(service_ref) + self.assertFalse(result) + + self.mox.ResetAll() + + def test_report_state(self): + serv = self.useFixture( + ServiceFixture(self._host, self._binary, self._topic)).serv + serv.start() + db.service_get_by_args(self._ctx, self._host, self._binary) + self.servicegroup_api = servicegroup.API() + + # updating model_disconnected + serv.model_disconnected = True + self.servicegroup_api._driver._report_state(serv) + self.assertFalse(serv.model_disconnected) + + # handling exception + serv.model_disconnected = True + self.servicegroup_api._driver.mc = None + self.servicegroup_api._driver._report_state(serv) + self.assertTrue(serv.model_disconnected) + + delattr(serv, 'model_disconnected') + self.servicegroup_api._driver.mc = None + self.servicegroup_api._driver._report_state(serv) + self.assertTrue(serv.model_disconnected) diff --git a/rack/tests/servicegroup/test_zk_driver.py b/rack/tests/servicegroup/test_zk_driver.py new file mode 100644 index 0000000..620326e --- /dev/null +++ b/rack/tests/servicegroup/test_zk_driver.py @@ -0,0 +1,63 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test the ZooKeeper driver for servicegroup. + +You need to install ZooKeeper locally and related dependencies +to run the test. It's unclear how to install python-zookeeper lib +in venv so you might have to run the test without it. + +To set up in Ubuntu 12.04: +$ sudo apt-get install zookeeper zookeeperd python-zookeeper +$ sudo pip install evzookeeper +$ nosetests rack.tests.servicegroup.test_zk_driver +""" + +import eventlet + +from rack import servicegroup +from rack import test + + +class ZKServiceGroupTestCase(test.NoDBTestCase): + + def setUp(self): + super(ZKServiceGroupTestCase, self).setUp() + servicegroup.API._driver = None + from rack.servicegroup.drivers import zk + self.flags(servicegroup_driver='zk') + self.flags(address='localhost:2181', group="zookeeper") + try: + zk.ZooKeeperDriver() + except ImportError: + self.skipTest("Unable to test due to lack of ZooKeeper") + + def test_join_leave(self): + self.servicegroup_api = servicegroup.API() + service_id = {'topic': 'unittest', 'host': 'serviceA'} + self.servicegroup_api.join(service_id['host'], service_id['topic']) + self.assertTrue(self.servicegroup_api.service_is_up(service_id)) + self.servicegroup_api.leave(service_id['host'], service_id['topic']) + # make sure zookeeper is updated and watcher is triggered + eventlet.sleep(1) + self.assertFalse(self.servicegroup_api.service_is_up(service_id)) + + def test_stop(self): + self.servicegroup_api = servicegroup.API() + service_id = {'topic': 'unittest', 'host': 'serviceA'} + pulse = self.servicegroup_api.join(service_id['host'], + service_id['topic'], None) + self.assertTrue(self.servicegroup_api.service_is_up(service_id)) + pulse.stop() + eventlet.sleep(1) + self.assertFalse(self.servicegroup_api.service_is_up(service_id)) diff --git a/rack/tests/test_service.py b/rack/tests/test_service.py new file mode 100644 index 0000000..4f49bf7 --- /dev/null +++ b/rack/tests/test_service.py @@ -0,0 +1,329 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Unit Tests for remote procedure calls using queue +""" + +import sys +import testtools + +import mock +import mox +from oslo.config import cfg + +from rack import context +from rack import db +from rack import exception +from rack import manager +from rack import rpc +from rack import service +from rack import test +from rack.tests import utils +from rack import wsgi + +from rack.openstack.common import service as _service + +test_service_opts = [ + cfg.StrOpt("fake_manager", + default="rack.tests.test_service.FakeManager", + help="Manager for testing"), + cfg.StrOpt("test_service_listen", + default='127.0.0.1', + help="Host to bind test service to"), + cfg.IntOpt("test_service_listen_port", + default=0, + help="Port number to bind test service to"), + ] + +CONF = cfg.CONF +CONF.register_opts(test_service_opts) + + +class FakeManager(manager.Manager): + """Fake manager for tests.""" + def test_method(self): + return 'manager' + + +class ExtendedService(service.Service): + def test_method(self): + return 'service' + + +class ServiceManagerTestCase(test.TestCase): + """Test cases for Services.""" + + def test_message_gets_to_manager(self): + serv = service.Service('test', + 'test', + 'test', + 'rack.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'manager') + + def test_override_manager_method(self): + serv = ExtendedService('test', + 'test', + 'test', + 'rack.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'service') + + def test_service_with_min_down_time(self): + CONF.set_override('service_down_time', 10) + CONF.set_override('report_interval', 10) + serv = service.Service('test', + 'test', + 'test', + 'rack.tests.test_service.FakeManager') + serv.start() + self.assertEqual(CONF.service_down_time, 25) + + +class ServiceFlagsTestCase(test.TestCase): + def test_service_enabled_on_create_based_on_flag(self): + self.flags(enable_new_services=True) + host = 'foo' + binary = 'rack-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assertTrue(not ref['disabled']) + + def test_service_disabled_on_create_based_on_flag(self): + self.flags(enable_new_services=False) + host = 'foo' + binary = 'rack-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assertTrue(ref['disabled']) + + +class ServiceTestCase(test.TestCase): + """Test cases for Services.""" + + def setUp(self): + super(ServiceTestCase, self).setUp() + self.host = 'foo' + self.binary = 'rack-fake' + self.topic = 'fake' + self.mox.StubOutWithMock(db, 'service_create') + self.mox.StubOutWithMock(db, 'service_get_by_args') + + def test_create(self): + app = service.Service.create(host=self.host, binary=self.binary, + topic=self.topic) + + self.assertTrue(app) + + def _service_start_mocks(self): + service_create = {'host': self.host, + 'binary': self.binary, + 'topic': self.topic, + 'report_count': 0} + service_ref = {'host': self.host, + 'binary': self.binary, + 'topic': self.topic, + 'report_count': 0, + 'id': 1} + + db.service_get_by_args(mox.IgnoreArg(), + self.host, self.binary).AndRaise(exception.NotFound()) + db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + return service_ref + + def test_init_and_start_hooks(self): + self.manager_mock = self.mox.CreateMock(FakeManager) + self.mox.StubOutWithMock(sys.modules[__name__], + 'FakeManager', use_mock_anything=True) + self.mox.StubOutWithMock(self.manager_mock, 'init_host') + self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook') + self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook') + + FakeManager(host=self.host).AndReturn(self.manager_mock) + + self.manager_mock.service_name = self.topic + self.manager_mock.additional_endpoints = [] + + # init_host is called before any service record is created + self.manager_mock.init_host() + self._service_start_mocks() + # pre_start_hook is called after service record is created, + # but before RPC consumer is created + self.manager_mock.pre_start_hook() + # post_start_hook is called after RPC consumer is created. + self.manager_mock.post_start_hook() + + self.mox.ReplayAll() + + serv = service.Service(self.host, + self.binary, + self.topic, + 'rack.tests.test_service.FakeManager') + serv.start() + + def test_service_check_create_race(self): + self.manager_mock = self.mox.CreateMock(FakeManager) + self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager', + use_mock_anything=True) + self.mox.StubOutWithMock(self.manager_mock, 'init_host') + self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook') + self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook') + + FakeManager(host=self.host).AndReturn(self.manager_mock) + + # init_host is called before any service record is created + self.manager_mock.init_host() + + db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary + ).AndRaise(exception.NotFound) + ex = exception.ServiceTopicExists(host='foo', topic='bar') + db.service_create(mox.IgnoreArg(), mox.IgnoreArg() + ).AndRaise(ex) + + class TestException(Exception): + pass + + db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary + ).AndRaise(TestException) + + self.mox.ReplayAll() + + serv = service.Service(self.host, + self.binary, + self.topic, + 'rack.tests.test_service.FakeManager') + self.assertRaises(TestException, serv.start) + + def test_parent_graceful_shutdown(self): + self.manager_mock = self.mox.CreateMock(FakeManager) + self.mox.StubOutWithMock(sys.modules[__name__], + 'FakeManager', use_mock_anything=True) + self.mox.StubOutWithMock(self.manager_mock, 'init_host') + self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook') + self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook') + + self.mox.StubOutWithMock(_service.Service, 'stop') + + FakeManager(host=self.host).AndReturn(self.manager_mock) + + self.manager_mock.service_name = self.topic + self.manager_mock.additional_endpoints = [] + + # init_host is called before any service record is created + self.manager_mock.init_host() + self._service_start_mocks() + # pre_start_hook is called after service record is created, + # but before RPC consumer is created + self.manager_mock.pre_start_hook() + # post_start_hook is called after RPC consumer is created. + self.manager_mock.post_start_hook() + + _service.Service.stop() + + self.mox.ReplayAll() + + serv = service.Service(self.host, + self.binary, + self.topic, + 'rack.tests.test_service.FakeManager') + serv.start() + + serv.stop() + + @mock.patch('rack.servicegroup.API') + @mock.patch('rack.db.api.service_get_by_args') + def test_parent_graceful_shutdown_with_cleanup_host(self, + mock_svc_get_by_args, + mock_API): + self.mox.UnsetStubs() + mock_svc_get_by_args.return_value = {'id': 'some_value'} + mock_manager = mock.Mock() + + serv = service.Service(self.host, + self.binary, + self.topic, + 'rack.tests.test_service.FakeManager') + + serv.manager = mock_manager + serv.manager.additional_endpoints = [] + + serv.start() + serv.manager.init_host.assert_called_with() + + serv.stop() + serv.manager.cleanup_host.assert_called_with() + + @mock.patch('rack.servicegroup.API') + @mock.patch('rack.db.api.service_get_by_args') + @mock.patch.object(rpc, 'get_server') + def test_service_stop_waits_for_rpcserver( + self, mock_rpc, mock_svc_get_by_args, mock_API): + self.mox.UnsetStubs() + mock_svc_get_by_args.return_value = {'id': 'some_value'} + serv = service.Service(self.host, + self.binary, + self.topic, + 'rack.tests.test_service.FakeManager') + serv.start() + serv.stop() + serv.rpcserver.start.assert_called_once_with() + serv.rpcserver.stop.assert_called_once_with() + serv.rpcserver.wait.assert_called_once_with() + + +class TestWSGIService(test.TestCase): + + def setUp(self): + super(TestWSGIService, self).setUp() + self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) + + def test_service_random_port(self): + test_service = service.WSGIService("test_service") + test_service.start() + self.assertNotEqual(0, test_service.port) + test_service.stop() + + def test_service_start_with_illegal_workers(self): + CONF.set_override("rackapi_workers", -1) + self.assertRaises(exception.InvalidInput, + service.WSGIService, "rackapi") + + @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support") + def test_service_random_port_with_ipv6(self): + CONF.set_default("test_service_listen", "::1") + test_service = service.WSGIService("test_service") + test_service.start() + self.assertEqual("::1", test_service.host) + self.assertNotEqual(0, test_service.port) + test_service.stop() + + +class TestLauncher(test.TestCase): + + def setUp(self): + super(TestLauncher, self).setUp() + self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) + self.service = service.WSGIService("test_service") + + def test_launch_app(self): + service.serve(self.service) + self.assertNotEqual(0, self.service.port) + service._launcher.stop() diff --git a/rack/tests/utils.py b/rack/tests/utils.py new file mode 100644 index 0000000..bb4f5d7 --- /dev/null +++ b/rack/tests/utils.py @@ -0,0 +1,86 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import errno +import platform +import socket +import sys + +from oslo.config import cfg + +import rack.context +import rack.db +from rack import exception + +CONF = cfg.CONF +CONF.import_opt('use_ipv6', 'rack.netconf') + + +def get_test_admin_context(): + return rack.context.get_admin_context() + + + +def is_osx(): + return platform.mac_ver()[0] != '' + + +test_dns_managers = [] + + +def cleanup_dns_managers(): + global test_dns_managers + for manager in test_dns_managers: + manager.delete_dns_file() + test_dns_managers = [] + + +def killer_xml_body(): + return ((""" + + ]> + + + %(d)s + + """) % { + 'a': 'A' * 10, + 'b': '&a;' * 10, + 'c': '&b;' * 10, + 'd': '&c;' * 9999, + }).strip() + + +def is_ipv6_supported(): + has_ipv6_support = socket.has_ipv6 + try: + s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + s.close() + except socket.error as e: + if e.errno == errno.EAFNOSUPPORT: + has_ipv6_support = False + else: + raise + + # check if there is at least one interface with ipv6 + if has_ipv6_support and sys.platform.startswith('linux'): + try: + with open('/proc/net/if_inet6') as f: + if not f.read(): + has_ipv6_support = False + except IOError: + has_ipv6_support = False + + return has_ipv6_support diff --git a/rack/utils.py b/rack/utils.py new file mode 100644 index 0000000..410df53 --- /dev/null +++ b/rack/utils.py @@ -0,0 +1,1165 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities and helper functions.""" + +import contextlib +import datetime +import functools +import hashlib +import inspect +import multiprocessing +import os +import pyclbr +import random +import re +import shutil +import socket +import struct +import sys +import tempfile +from xml.sax import saxutils + +import eventlet +import netaddr +from oslo.config import cfg +from oslo import messaging +import six + +from rack import exception +from rack.openstack.common import excutils +from rack.openstack.common import gettextutils +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import importutils +from rack.openstack.common import lockutils +from rack.openstack.common import log as logging +from rack.openstack.common import processutils +from rack.openstack.common import timeutils + +notify_decorator = 'rack.notifications.notify_decorator' + +monkey_patch_opts = [ + cfg.BoolOpt('monkey_patch', + default=False, + help='Whether to log monkey patching'), + cfg.ListOpt('monkey_patch_modules', + default=[ + 'rack.api.ec2.cloud:%s' % (notify_decorator), + 'rack.compute.api:%s' % (notify_decorator) + ], + help='List of modules/decorators to monkey patch'), +] +utils_opts = [ + cfg.IntOpt('password_length', + default=12, + help='Length of generated instance admin passwords'), + cfg.StrOpt('instance_usage_audit_period', + default='month', + help='Time period to generate instance usages for. ' + 'Time period must be hour, day, month or year'), + cfg.StrOpt('rootwrap_config', + default="/etc/rack/rootwrap.conf", + help='Path to the rootwrap configuration file to use for ' + 'running commands as root'), + cfg.StrOpt('tempdir', + help='Explicitly specify the temporary working directory'), +] +CONF = cfg.CONF +CONF.register_opts(monkey_patch_opts) +CONF.register_opts(utils_opts) + +LOG = logging.getLogger(__name__) + +TIME_UNITS = { + 'SECOND': 1, + 'MINUTE': 60, + 'HOUR': 3600, + 'DAY': 84400 +} + + +_IS_NEUTRON = None + +synchronized = lockutils.synchronized_with_prefix('rack-') + +SM_IMAGE_PROP_PREFIX = "image_" +SM_INHERITABLE_KEYS = ( + 'min_ram', 'min_disk', 'disk_format', 'container_format', +) + + +def vpn_ping(address, port, timeout=0.05, session_id=None): + """Sends a vpn negotiation packet and returns the server session. + + Returns False on a failure. Basic packet structure is below. + + Client packet (14 bytes):: + + 0 1 8 9 13 + +-+--------+-----+ + |x| cli_id |?????| + +-+--------+-----+ + x = packet identifier 0x38 + cli_id = 64 bit identifier + ? = unknown, probably flags/padding + + Server packet (26 bytes):: + + 0 1 8 9 13 14 21 2225 + +-+--------+-----+--------+----+ + |x| srv_id |?????| cli_id |????| + +-+--------+-----+--------+----+ + x = packet identifier 0x40 + cli_id = 64 bit identifier + ? = unknown, probably flags/padding + bit 9 was 1 and the rest were 0 in testing + + """ + if session_id is None: + session_id = random.randint(0, 0xffffffffffffffff) + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + data = struct.pack('!BQxxxxx', 0x38, session_id) + sock.sendto(data, (address, port)) + sock.settimeout(timeout) + try: + received = sock.recv(2048) + except socket.timeout: + return False + finally: + sock.close() + fmt = '!BQxxxxxQxxxx' + if len(received) != struct.calcsize(fmt): + LOG.warn(_('Expected to receive %(exp)s bytes, but actually %(act)s') % + dict(exp=struct.calcsize(fmt), act=len(received))) + return False + (identifier, server_sess, client_sess) = struct.unpack(fmt, received) + if identifier == 0x40 and client_sess == session_id: + return server_sess + + +def _get_root_helper(): + return 'sudo rack-rootwrap %s' % CONF.rootwrap_config + + +def execute(*cmd, **kwargs): + """Convenience wrapper around oslo's execute() method.""" + if 'run_as_root' in kwargs and not 'root_helper' in kwargs: + kwargs['root_helper'] = _get_root_helper() + return processutils.execute(*cmd, **kwargs) + + +def trycmd(*args, **kwargs): + """Convenience wrapper around oslo's trycmd() method.""" + if 'run_as_root' in kwargs and not 'root_helper' in kwargs: + kwargs['root_helper'] = _get_root_helper() + return processutils.trycmd(*args, **kwargs) + + +def rackdir(): + import rack + return os.path.abspath(rack.__file__).split('rack/__init__.py')[0] + + +def generate_uid(topic, size=8): + characters = '01234567890abcdefghijklmnopqrstuvwxyz' + choices = [random.choice(characters) for _x in xrange(size)] + return '%s-%s' % (topic, ''.join(choices)) + + +DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O + 'abcdefghijkmnopqrstuvwxyz') # Removed: l + + +EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O + + +def last_completed_audit_period(unit=None, before=None): + """This method gives you the most recently *completed* audit period. + + arguments: + units: string, one of 'hour', 'day', 'month', 'year' + Periods normally begin at the beginning (UTC) of the + period unit (So a 'day' period begins at midnight UTC, + a 'month' unit on the 1st, a 'year' on Jan, 1) + unit string may be appended with an optional offset + like so: 'day@18' This will begin the period at 18:00 + UTC. 'month@15' starts a monthly period on the 15th, + and year@3 begins a yearly one on March 1st. + before: Give the audit period most recently completed before + . Defaults to now. + + + returns: 2 tuple of datetimes (begin, end) + The begin timestamp of this audit period is the same as the + end of the previous. + """ + if not unit: + unit = CONF.instance_usage_audit_period + + offset = 0 + if '@' in unit: + unit, offset = unit.split("@", 1) + offset = int(offset) + + if before is not None: + rightnow = before + else: + rightnow = timeutils.utcnow() + if unit not in ('month', 'day', 'year', 'hour'): + raise ValueError('Time period must be hour, day, month or year') + if unit == 'month': + if offset == 0: + offset = 1 + end = datetime.datetime(day=offset, + month=rightnow.month, + year=rightnow.year) + if end >= rightnow: + year = rightnow.year + if 1 >= rightnow.month: + year -= 1 + month = 12 + (rightnow.month - 1) + else: + month = rightnow.month - 1 + end = datetime.datetime(day=offset, + month=month, + year=year) + year = end.year + if 1 >= end.month: + year -= 1 + month = 12 + (end.month - 1) + else: + month = end.month - 1 + begin = datetime.datetime(day=offset, month=month, year=year) + + elif unit == 'year': + if offset == 0: + offset = 1 + end = datetime.datetime(day=1, month=offset, year=rightnow.year) + if end >= rightnow: + end = datetime.datetime(day=1, + month=offset, + year=rightnow.year - 1) + begin = datetime.datetime(day=1, + month=offset, + year=rightnow.year - 2) + else: + begin = datetime.datetime(day=1, + month=offset, + year=rightnow.year - 1) + + elif unit == 'day': + end = datetime.datetime(hour=offset, + day=rightnow.day, + month=rightnow.month, + year=rightnow.year) + if end >= rightnow: + end = end - datetime.timedelta(days=1) + begin = end - datetime.timedelta(days=1) + + elif unit == 'hour': + end = rightnow.replace(minute=offset, second=0, microsecond=0) + if end >= rightnow: + end = end - datetime.timedelta(hours=1) + begin = end - datetime.timedelta(hours=1) + + return (begin, end) + + +def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): + """Generate a random password from the supplied symbol groups. + + At least one symbol from each group will be included. Unpredictable + results if length is less than the number of symbol groups. + + Believed to be reasonably secure (with a reasonable password length!) + + """ + if length is None: + length = CONF.password_length + + r = random.SystemRandom() + + # NOTE(jerdfelt): Some password policies require at least one character + # from each group of symbols, so start off with one random character + # from each symbol group + password = [r.choice(s) for s in symbolgroups] + # If length < len(symbolgroups), the leading characters will only + # be from the first length groups. Try our best to not be predictable + # by shuffling and then truncating. + r.shuffle(password) + password = password[:length] + length -= len(password) + + # then fill with random characters from all symbol groups + symbols = ''.join(symbolgroups) + password.extend([r.choice(symbols) for _i in xrange(length)]) + + # finally shuffle to ensure first x characters aren't from a + # predictable group + r.shuffle(password) + + return ''.join(password) + + +def get_my_ipv4_address(): + """Run ip route/addr commands to figure out the best ipv4 + """ + LOCALHOST = '127.0.0.1' + try: + out = execute('ip', '-f', 'inet', '-o', 'route', 'show') + + # Find the default route + regex_default = ('default\s*via\s*' + '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + '\s*dev\s*(\w*)\s*') + default_routes = re.findall(regex_default, out[0]) + if not default_routes: + return LOCALHOST + gateway, iface = default_routes[0] + + # Find the right subnet for the gateway/interface for + # the default route + route = ('(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\/(\d{1,2})' + '\s*dev\s*(\w*)\s*') + for match in re.finditer(route, out[0]): + subnet = netaddr.IPNetwork(match.group(1) + "/" + match.group(2)) + if (match.group(3) == iface and + netaddr.IPAddress(gateway) in subnet): + try: + return _get_ipv4_address_for_interface(iface) + except exception.RackException: + pass + except Exception as ex: + LOG.error(_("Couldn't get IPv4 : %(ex)s") % {'ex': ex}) + return LOCALHOST + + +def _get_ipv4_address_for_interface(iface): + """Run ip addr show for an interface and grab its ipv4 addresses + """ + try: + out = execute('ip', '-f', 'inet', '-o', 'addr', 'show', iface) + regexp_address = re.compile('inet\s*' + '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})') + address = [m.group(1) for m in regexp_address.finditer(out[0]) + if m.group(1) != '127.0.0.1'] + if address: + return address[0] + else: + msg = _('IPv4 address is not found.: %s') % out[0] + raise exception.RackException(msg) + except Exception as ex: + msg = _("Couldn't get IPv4 of %(interface)s" + " : %(ex)s") % {'interface': iface, 'ex': ex} + LOG.error(msg) + raise exception.RackException(msg) + + +def get_my_linklocal(interface): + try: + if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) + condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' + links = [re.search(condition, x) for x in if_str[0].split('\n')] + address = [w.group(1) for w in links if w is not None] + if address[0] is not None: + return address[0] + else: + msg = _('Link Local address is not found.:%s') % if_str + raise exception.RackException(msg) + except Exception as ex: + msg = _("Couldn't get Link Local IP of %(interface)s" + " :%(ex)s") % {'interface': interface, 'ex': ex} + raise exception.RackException(msg) + + +class LazyPluggable(object): + """A pluggable backend loaded lazily based on some value.""" + + def __init__(self, pivot, config_group=None, **backends): + self.__backends = backends + self.__pivot = pivot + self.__backend = None + self.__config_group = config_group + + def __get_backend(self): + if not self.__backend: + if self.__config_group is None: + backend_name = CONF[self.__pivot] + else: + backend_name = CONF[self.__config_group][self.__pivot] + if backend_name not in self.__backends: + msg = _('Invalid backend: %s') % backend_name + raise exception.RackException(msg) + + backend = self.__backends[backend_name] + if isinstance(backend, tuple): + name = backend[0] + fromlist = backend[1] + else: + name = backend + fromlist = backend + + self.__backend = __import__(name, None, None, fromlist) + return self.__backend + + def __getattr__(self, key): + backend = self.__get_backend() + return getattr(backend, key) + + +def xhtml_escape(value): + """Escapes a string so it is valid within XML or XHTML. + + """ + return saxutils.escape(value, {'"': '"', "'": '''}) + + +def utf8(value): + """Try to turn a string into utf-8 if possible. + + Code is directly from the utf8 function in + http://github.com/facebook/tornado/blob/master/tornado/escape.py + + """ + if isinstance(value, unicode): + return value.encode('utf-8') + elif isinstance(value, gettextutils.Message): + return unicode(value).encode('utf-8') + assert isinstance(value, str) + return value + + +def check_isinstance(obj, cls): + """Checks that obj is of type cls, and lets PyLint infer types.""" + if isinstance(obj, cls): + return obj + raise Exception(_('Expected object of type: %s') % (str(cls))) + + +def parse_server_string(server_str): + """Parses the given server_string and returns a list of host and port. + If it's not a combination of host part and port, the port element + is a null string. If the input is invalid expression, return a null + list. + """ + try: + # First of all, exclude pure IPv6 address (w/o port). + if netaddr.valid_ipv6(server_str): + return (server_str, '') + + # Next, check if this is IPv6 address with a port number combination. + if server_str.find("]:") != -1: + (address, port) = server_str.replace('[', '', 1).split(']:') + return (address, port) + + # Third, check if this is a combination of an address and a port + if server_str.find(':') == -1: + return (server_str, '') + + # This must be a combination of an address and a port + (address, port) = server_str.split(':') + return (address, port) + + except Exception: + LOG.error(_('Invalid server_string: %s'), server_str) + return ('', '') + + +def is_int_like(val): + """Check if a value looks like an int.""" + try: + return str(int(val)) == str(val) + except Exception: + return False + +def is_valid_protocol(protocol): + return protocol in ["tcp", "udp", "icmp"] + +def is_valid_ipv4(address): + """Verify that address represents a valid IPv4 address.""" + try: + return netaddr.valid_ipv4(address) + except Exception: + return False + + +def is_valid_ipv6(address): + try: + return netaddr.valid_ipv6(address) + except Exception: + return False + + +def is_valid_ip_address(address): + return is_valid_ipv4(address) or is_valid_ipv6(address) + + +def is_valid_ipv6_cidr(address): + try: + str(netaddr.IPNetwork(address, version=6).cidr) + return True + except Exception: + return False + + +def get_shortened_ipv6(address): + addr = netaddr.IPAddress(address, version=6) + return str(addr.ipv6()) + + +def get_shortened_ipv6_cidr(address): + net = netaddr.IPNetwork(address, version=6) + return str(net.cidr) + + +def is_valid_cidr(address): + """Check if address is valid + + The provided address can be a IPv6 or a IPv4 + CIDR address. + """ + try: + # Validate the correct CIDR Address + netaddr.IPNetwork(address) + except netaddr.core.AddrFormatError: + return False + except UnboundLocalError: + # NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in + # https://github.com/drkjam/netaddr/issues/2) + return False + + # Prior validation partially verify /xx part + # Verify it here + ip_segment = address.split('/') + + if (len(ip_segment) <= 1 or + ip_segment[1] == ''): + return False + + return True + + +def get_ip_version(network): + """Returns the IP version of a network (IPv4 or IPv6). + + Raises AddrFormatError if invalid network. + """ + if netaddr.IPNetwork(network).version == 6: + return "IPv6" + elif netaddr.IPNetwork(network).version == 4: + return "IPv4" + + +def monkey_patch(): + """If the CONF.monkey_patch set as True, + this function patches a decorator + for all functions in specified modules. + You can set decorators for each modules + using CONF.monkey_patch_modules. + The format is "Module path:Decorator function". + Example: + 'rack.api.ec2.cloud:rack.notifications.notify_decorator' + + Parameters of the decorator is as follows. + (See rack.notifications.notify_decorator) + + name - name of the function + function - object of the function + """ + # If CONF.monkey_patch is not True, this function do nothing. + if not CONF.monkey_patch: + return + # Get list of modules and decorators + for module_and_decorator in CONF.monkey_patch_modules: + module, decorator_name = module_and_decorator.split(':') + # import decorator function + decorator = importutils.import_class(decorator_name) + __import__(module) + # Retrieve module information using pyclbr + module_data = pyclbr.readmodule_ex(module) + for key in module_data.keys(): + # set the decorator for the class methods + if isinstance(module_data[key], pyclbr.Class): + clz = importutils.import_class("%s.%s" % (module, key)) + for method, func in inspect.getmembers(clz, inspect.ismethod): + setattr(clz, method, + decorator("%s.%s.%s" % (module, key, method), func)) + # set the decorator for the function + if isinstance(module_data[key], pyclbr.Function): + func = importutils.import_class("%s.%s" % (module, key)) + setattr(sys.modules[module], key, + decorator("%s.%s" % (module, key), func)) + + +def convert_to_list_dict(lst, label): + """Convert a value or list into a list of dicts.""" + if not lst: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + + +def make_dev_path(dev, partition=None, base='/dev'): + """Return a path to a particular device. + + >>> make_dev_path('xvdc') + /dev/xvdc + + >>> make_dev_path('xvdc', 1) + /dev/xvdc1 + """ + path = os.path.join(base, dev) + if partition: + path += str(partition) + return path + + +def sanitize_hostname(hostname): + """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" + if isinstance(hostname, unicode): + hostname = hostname.encode('latin-1', 'ignore') + + hostname = re.sub('[ _]', '-', hostname) + hostname = re.sub('[^\w.-]+', '', hostname) + hostname = hostname.lower() + hostname = hostname.strip('.-') + + return hostname + + +def read_cached_file(filename, cache_info, reload_func=None): + """Read from a file if it has been modified. + + :param cache_info: dictionary to hold opaque cache. + :param reload_func: optional function to be called with data when + file is reloaded due to a modification. + + :returns: data from file + + """ + mtime = os.path.getmtime(filename) + if not cache_info or mtime != cache_info.get('mtime'): + LOG.debug(_("Reloading cached file %s") % filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + if reload_func: + reload_func(cache_info['data']) + return cache_info['data'] + + +@contextlib.contextmanager +def temporary_mutation(obj, **kwargs): + """Temporarily set the attr on a particular object to a given value then + revert when finished. + + One use of this is to temporarily set the read_deleted flag on a context + object: + + with temporary_mutation(context, read_deleted="yes"): + do_something_that_needed_deleted_objects() + """ + def is_dict_like(thing): + return hasattr(thing, 'has_key') + + def get(thing, attr, default): + if is_dict_like(thing): + return thing.get(attr, default) + else: + return getattr(thing, attr, default) + + def set_value(thing, attr, val): + if is_dict_like(thing): + thing[attr] = val + else: + setattr(thing, attr, val) + + def delete(thing, attr): + if is_dict_like(thing): + del thing[attr] + else: + delattr(thing, attr) + + NOT_PRESENT = object() + + old_values = {} + for attr, new_value in kwargs.items(): + old_values[attr] = get(obj, attr, NOT_PRESENT) + set_value(obj, attr, new_value) + + try: + yield + finally: + for attr, old_value in old_values.items(): + if old_value is NOT_PRESENT: + delete(obj, attr) + else: + set_value(obj, attr, old_value) + + +def generate_mac_address(): + """Generate an Ethernet MAC address.""" + # NOTE(vish): We would prefer to use 0xfe here to ensure that linux + # bridge mac addresses don't change, but it appears to + # conflict with libvirt, so we use the next highest octet + # that has the unicast and locally administered bits set + # properly: 0xfa. + # Discussion: https://bugs.launchpad.net/rack/+bug/921838 + mac = [0xfa, 0x16, 0x3e, + random.randint(0x00, 0xff), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: "%02x" % x, mac)) + + +def read_file_as_root(file_path): + """Secure helper to read file as root.""" + try: + out, _err = execute('cat', file_path, run_as_root=True) + return out + except processutils.ProcessExecutionError: + raise exception.FileNotFound(file_path=file_path) + + +@contextlib.contextmanager +def temporary_chown(path, owner_uid=None): + """Temporarily chown a path. + + :param owner_uid: UID of temporary owner (defaults to current user) + """ + if owner_uid is None: + owner_uid = os.getuid() + + orig_uid = os.stat(path).st_uid + + if orig_uid != owner_uid: + execute('chown', owner_uid, path, run_as_root=True) + try: + yield + finally: + if orig_uid != owner_uid: + execute('chown', orig_uid, path, run_as_root=True) + + +def chown(path, owner_uid=None): + """chown a path. + + :param owner_uid: UID of owner (defaults to current user) + """ + if owner_uid is None: + owner_uid = os.getuid() + + orig_uid = os.stat(path).st_uid + + if orig_uid != owner_uid: + execute('chown', owner_uid, path, run_as_root=True) + + +@contextlib.contextmanager +def tempdir(**kwargs): + argdict = kwargs.copy() + if 'dir' not in argdict: + argdict['dir'] = CONF.tempdir + tmpdir = tempfile.mkdtemp(**argdict) + try: + yield tmpdir + finally: + try: + shutil.rmtree(tmpdir) + except OSError as e: + LOG.error(_('Could not remove tmpdir: %s'), str(e)) + + +def walk_class_hierarchy(clazz, encountered=None): + """Walk class hierarchy, yielding most derived classes first.""" + if not encountered: + encountered = [] + for subclass in clazz.__subclasses__(): + if subclass not in encountered: + encountered.append(subclass) + # drill down to leaves first + for subsubclass in walk_class_hierarchy(subclass, encountered): + yield subsubclass + yield subclass + + +class UndoManager(object): + """Provides a mechanism to facilitate rolling back a series of actions + when an exception is raised. + """ + def __init__(self): + self.undo_stack = [] + + def undo_with(self, undo_func): + self.undo_stack.append(undo_func) + + def _rollback(self): + for undo_func in reversed(self.undo_stack): + undo_func() + + def rollback_and_reraise(self, msg=None, **kwargs): + """Rollback a series of actions then re-raise the exception. + + .. note:: (sirp) This should only be called within an + exception handler. + """ + with excutils.save_and_reraise_exception(): + if msg: + LOG.exception(msg, **kwargs) + + self._rollback() + + +def mkfs(fs, path, label=None, run_as_root=False): + """Format a file or block device + + :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4' + 'btrfs', etc.) + :param path: Path to file or block device to format + :param label: Volume label to use + """ + if fs == 'swap': + args = ['mkswap'] + else: + args = ['mkfs', '-t', fs] + #add -F to force no interactive execute on non-block device. + if fs in ('ext3', 'ext4', 'ntfs'): + args.extend(['-F']) + if label: + if fs in ('msdos', 'vfat'): + label_opt = '-n' + else: + label_opt = '-L' + args.extend([label_opt, label]) + args.append(path) + execute(*args, run_as_root=run_as_root) + + +def last_bytes(file_like_object, num): + """Return num bytes from the end of the file, and remaining byte count. + + :param file_like_object: The file to read + :param num: The number of bytes to return + + :returns (data, remaining) + """ + + try: + file_like_object.seek(-num, os.SEEK_END) + except IOError as e: + if e.errno == 22: + file_like_object.seek(0, os.SEEK_SET) + else: + raise + + remaining = file_like_object.tell() + return (file_like_object.read(), remaining) + + +def metadata_to_dict(metadata): + result = {} + for item in metadata: + if not item.get('deleted'): + result[item['key']] = item['value'] + return result + + +def dict_to_metadata(metadata): + result = [] + for key, value in metadata.iteritems(): + result.append(dict(key=key, value=value)) + return result + + +def instance_meta(instance): + if isinstance(instance['metadata'], dict): + return instance['metadata'] + else: + return metadata_to_dict(instance['metadata']) + + +def instance_sys_meta(instance): + if not instance.get('system_metadata'): + return {} + if isinstance(instance['system_metadata'], dict): + return instance['system_metadata'] + else: + return metadata_to_dict(instance['system_metadata']) + + +def get_wrapped_function(function): + """Get the method at the bottom of a stack of decorators.""" + if not hasattr(function, 'func_closure') or not function.func_closure: + return function + + def _get_wrapped_function(function): + if not hasattr(function, 'func_closure') or not function.func_closure: + return None + + for closure in function.func_closure: + func = closure.cell_contents + + deeper_func = _get_wrapped_function(func) + if deeper_func: + return deeper_func + elif hasattr(closure.cell_contents, '__call__'): + return closure.cell_contents + + return _get_wrapped_function(function) + + +def expects_func_args(*args): + def _decorator_checker(dec): + @functools.wraps(dec) + def _decorator(f): + base_f = get_wrapped_function(f) + arg_names, a, kw, _default = inspect.getargspec(base_f) + if a or kw or set(args) <= set(arg_names): + # NOTE (ndipanov): We can't really tell if correct stuff will + # be passed if it's a function with *args or **kwargs so + # we still carry on and hope for the best + return dec(f) + else: + raise TypeError("Decorated function %(f_name)s does not " + "have the arguments expected by the " + "decorator %(d_name)s" % + {'f_name': base_f.__name__, + 'd_name': dec.__name__}) + return _decorator + return _decorator_checker + + +class ExceptionHelper(object): + """Class to wrap another and translate the ClientExceptions raised by its + function calls to the actual ones. + """ + + def __init__(self, target): + self._target = target + + def __getattr__(self, name): + func = getattr(self._target, name) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except messaging.ExpectedException as e: + raise (e.exc_info[1], None, e.exc_info[2]) + return wrapper + + +def check_string_length(value, name, min_length=0, max_length=None): + """Check the length of specified string + :param value: the value of the string + :param name: the name of the string + :param min_length: the min_length of the string + :param max_length: the max_length of the string + """ + if not isinstance(value, six.string_types): + msg = _("%s is not a string or unicode") % name + raise exception.InvalidInput(message=msg) + + if len(value) < min_length: + msg = _("%(name)s has a minimum character requirement of " + "%(min_length)s.") % {'name': name, 'min_length': min_length} + raise exception.InvalidInput(message=msg) + + if max_length and len(value) > max_length: + msg = _("%(name)s has more than %(max_length)s " + "characters.") % {'name': name, 'max_length': max_length} + raise exception.InvalidInput(message=msg) + + +def validate_integer(value, name, min_value=None, max_value=None): + """Make sure that value is a valid integer, potentially within range.""" + try: + value = int(str(value)) + except (ValueError, UnicodeEncodeError): + msg = _('%(value_name)s must be an integer') + raise exception.InvalidInput(reason=( + msg % {'value_name': name})) + + if min_value is not None: + if value < min_value: + msg = _('%(value_name)s must be >= %(min_value)d') + raise exception.InvalidInput( + reason=(msg % {'value_name': name, + 'min_value': min_value})) + if max_value is not None: + if value > max_value: + msg = _('%(value_name)s must be <= %(max_value)d') + raise exception.InvalidInput( + reason=( + msg % {'value_name': name, + 'max_value': max_value}) + ) + return value + + +def spawn_n(func, *args, **kwargs): + """Passthrough method for eventlet.spawn_n. + + This utility exists so that it can be stubbed for testing without + interfering with the service spawns. + """ + eventlet.spawn_n(func, *args, **kwargs) + + +def is_none_string(val): + """Check if a string represents a None value. + """ + if not isinstance(val, six.string_types): + return False + + return val.lower() == 'none' + + +def convert_version_to_int(version): + try: + if isinstance(version, six.string_types): + version = convert_version_to_tuple(version) + if isinstance(version, tuple): + return reduce(lambda x, y: (x * 1000) + y, version) + except Exception: + raise exception.RackException(message="Hypervisor version invalid.") + + +def convert_version_to_str(version_int): + version_numbers = [] + factor = 1000 + while version_int != 0: + version_number = version_int - (version_int // factor * factor) + version_numbers.insert(0, str(version_number)) + version_int = version_int / factor + + return reduce(lambda x, y: "%s.%s" % (x, y), version_numbers) + + +def convert_version_to_tuple(version_str): + return tuple(int(part) for part in version_str.split('.')) + +''' +def is_neutron(): + global _IS_NEUTRON + + if _IS_NEUTRON is not None: + return _IS_NEUTRON + + try: + # compatibility with Folsom/Grizzly configs + cls_name = CONF.network_api_class + if cls_name == 'rack.network.quantumv2.api.API': + cls_name = 'rack.network.neutronv2.api.API' + + from rack.network.neutronv2 import api as neutron_api + _IS_NEUTRON = issubclass(importutils.import_class(cls_name), + neutron_api.API) + except ImportError: + _IS_NEUTRON = False + + return _IS_NEUTRON +''' + +def reset_is_neutron(): + global _IS_NEUTRON + _IS_NEUTRON = None + + +def is_auto_disk_config_disabled(auto_disk_config_raw): + auto_disk_config_disabled = False + if auto_disk_config_raw is not None: + adc_lowered = auto_disk_config_raw.strip().lower() + if adc_lowered == "disabled": + auto_disk_config_disabled = True + return auto_disk_config_disabled + + +def get_auto_disk_config_from_instance(instance=None, sys_meta=None): + if sys_meta is None: + sys_meta = instance_sys_meta(instance) + return sys_meta.get("image_auto_disk_config") + + +def get_auto_disk_config_from_image_props(image_properties): + return image_properties.get("auto_disk_config") + + +def get_system_metadata_from_image(image_meta, flavor=None): + system_meta = {} + prefix_format = SM_IMAGE_PROP_PREFIX + '%s' + + for key, value in image_meta.get('properties', {}).iteritems(): + new_value = unicode(value)[:255] + system_meta[prefix_format % key] = new_value + + for key in SM_INHERITABLE_KEYS: + value = image_meta.get(key) + + if key == 'min_disk' and flavor: + if image_meta.get('disk_format') == 'vhd': + value = flavor['root_gb'] + else: + value = max(value, flavor['root_gb']) + + if value is None: + continue + + system_meta[prefix_format % key] = value + + return system_meta + + +def get_image_from_system_metadata(system_meta): + image_meta = {} + properties = {} + + if not isinstance(system_meta, dict): + system_meta = metadata_to_dict(system_meta) + + for key, value in system_meta.iteritems(): + if value is None: + continue + + # NOTE(xqueralt): Not sure this has to inherit all the properties or + # just the ones we need. Leaving it for now to keep the old behaviour. + if key.startswith(SM_IMAGE_PROP_PREFIX): + key = key[len(SM_IMAGE_PROP_PREFIX):] + + if key in SM_INHERITABLE_KEYS: + image_meta[key] = value + else: + # Skip properties that are non-inheritable + if key in CONF.non_inheritable_image_properties: + continue + properties[key] = value + + if properties: + image_meta['properties'] = properties + + return image_meta + + +def get_hash_str(base_str): + """returns string that represents hash of base_str (in hex format).""" + return hashlib.md5(base_str).hexdigest() + + +def cpu_count(): + try: + return multiprocessing.cpu_count() + except NotImplementedError: + return 1 diff --git a/rack/version.py b/rack/version.py new file mode 100644 index 0000000..00282d0 --- /dev/null +++ b/rack/version.py @@ -0,0 +1,91 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pbr.version + +from rack.openstack.common.gettextutils import _ + +RACK_VENDOR = "OpenStack Foundation" +RACK_PRODUCT = "OpenStack Rack" +RACK_PACKAGE = None # OS distro package version suffix + +loaded = False +version_info = pbr.version.VersionInfo('rack') +version_string = version_info.version_string + + +def _load_config(): + # Don't load in global context, since we can't assume + # these modules are accessible when distutils uses + # this module + import ConfigParser + + from oslo.config import cfg + + from rack.openstack.common import log as logging + + global loaded, RACK_VENDOR, RACK_PRODUCT, RACK_PACKAGE + if loaded: + return + + loaded = True + + cfgfile = cfg.CONF.find_file("release") + if cfgfile is None: + return + + try: + cfg = ConfigParser.RawConfigParser() + cfg.read(cfgfile) + + RACK_VENDOR = cfg.get("Rack", "vendor") + if cfg.has_option("Rack", "vendor"): + RACK_VENDOR = cfg.get("Rack", "vendor") + + RACK_PRODUCT = cfg.get("Rack", "product") + if cfg.has_option("Rack", "product"): + RACK_PRODUCT = cfg.get("Rack", "product") + + RACK_PACKAGE = cfg.get("Rack", "package") + if cfg.has_option("Rack", "package"): + RACK_PACKAGE = cfg.get("Rack", "package") + except Exception as ex: + LOG = logging.getLogger(__name__) + LOG.error(_("Failed to load %(cfgfile)s: %(ex)s"), + {'cfgfile': cfgfile, 'ex': ex}) + + +def vendor_string(): + _load_config() + + return RACK_VENDOR + + +def product_string(): + _load_config() + + return RACK_PRODUCT + + +def package_string(): + _load_config() + + return RACK_PACKAGE + + +def version_string_with_package(): + if package_string() is None: + return version_info.version_string() + else: + return "%s-%s" % (version_info.version_string(), package_string()) diff --git a/rack/wsgi.py b/rack/wsgi.py new file mode 100644 index 0000000..7cafe82 --- /dev/null +++ b/rack/wsgi.py @@ -0,0 +1,502 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utility methods for working with WSGI servers.""" + +from __future__ import print_function + +import os.path +import socket +import sys + +import eventlet +import eventlet.wsgi +import greenlet +from oslo.config import cfg +from paste import deploy +import routes.middleware +import ssl +import webob.dec +import webob.exc + +from rack import exception +from rack.openstack.common import excutils +from rack.openstack.common.gettextutils import _ +from rack.openstack.common import log as logging + +wsgi_opts = [ + cfg.StrOpt('api_paste_config', + default="api-paste.ini", + help='File name for the paste.deploy config for rack-api'), + cfg.StrOpt('wsgi_log_format', + default='%(client_ip)s "%(request_line)s" status: %(status_code)s' + ' len: %(body_length)s time: %(wall_seconds).7f', + help='A python format string that is used as the template to ' + 'generate log lines. The following values can be formatted ' + 'into it: client_ip, date_time, request_line, status_code, ' + 'body_length, wall_seconds.'), + cfg.StrOpt('ssl_ca_file', + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('ssl_cert_file', + help="SSL certificate of API server"), + cfg.StrOpt('ssl_key_file', + help="SSL private key of API server"), + cfg.IntOpt('tcp_keepidle', + default=600, + help="Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X."), + cfg.IntOpt('wsgi_default_pool_size', + default=1000, + help="Size of the pool of greenthreads used by wsgi"), + cfg.IntOpt('max_header_line', + default=16384, + help="Maximum line size of message headers to be accepted. " + "max_header_line may need to be increased when using " + "large tokens (typically those generated by the " + "Keystone v3 API with big service catalogs)."), + ] +CONF = cfg.CONF +CONF.register_opts(wsgi_opts) + +LOG = logging.getLogger(__name__) + + +class Server(object): + """Server class to manage a WSGI server, serving a WSGI application.""" + + default_pool_size = CONF.wsgi_default_pool_size + + def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, + protocol=eventlet.wsgi.HttpProtocol, backlog=128, + use_ssl=False, max_url_len=None): + """Initialize, but do not start, a WSGI server. + + :param name: Pretty name for logging. + :param app: The WSGI application to serve. + :param host: IP address to serve the application. + :param port: Port number to server the application. + :param pool_size: Maximum number of eventlets to spawn concurrently. + :param backlog: Maximum number of queued connections. + :param max_url_len: Maximum length of permitted URLs. + :returns: None + :raises: rack.exception.InvalidInput + """ + # Allow operators to customize http requests max header line size. + eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line + self.name = name + self.app = app + self._server = None + self._protocol = protocol + self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) + self._logger = logging.getLogger("rack.%s.wsgi.server" % self.name) + self._wsgi_logger = logging.WritableLogger(self._logger) + self._use_ssl = use_ssl + self._max_url_len = max_url_len + + if backlog < 1: + raise exception.InvalidInput( + reason='The backlog must be more than 1') + + bind_addr = (host, port) + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + try: + info = socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] + except Exception: + family = socket.AF_INET + + try: + self._socket = eventlet.listen(bind_addr, family, backlog=backlog) + except EnvironmentError: + LOG.error(_("Could not bind to %(host)s:%(port)s"), + {'host': host, 'port': port}) + raise + + (self.host, self.port) = self._socket.getsockname()[0:2] + LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__) + + def start(self): + """Start serving a WSGI application. + + :returns: None + """ + if self._use_ssl: + try: + ca_file = CONF.ssl_ca_file + cert_file = CONF.ssl_cert_file + key_file = CONF.ssl_key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError( + _("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError( + _("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError( + _("Unable to find key_file : %s") % key_file) + + if self._use_ssl and (not cert_file or not key_file): + raise RuntimeError( + _("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + ssl_kwargs = { + 'server_side': True, + 'certfile': cert_file, + 'keyfile': key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + self._socket = eventlet.wrap_ssl(self._socket, + **ssl_kwargs) + + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + self._socket.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to start %(name)s on %(host)s" + ":%(port)s with SSL support") % self.__dict__) + + wsgi_kwargs = { + 'func': eventlet.wsgi.server, + 'sock': self._socket, + 'site': self.app, + 'protocol': self._protocol, + 'custom_pool': self._pool, + 'log': self._wsgi_logger, + 'log_format': CONF.wsgi_log_format, + 'debug': False + } + + if self._max_url_len: + wsgi_kwargs['url_length_limit'] = self._max_url_len + + self._server = eventlet.spawn(**wsgi_kwargs) + + def stop(self): + """Stop this server. + + This is not a very nice action, as currently the method by which a + server is stopped is by killing its eventlet. + + :returns: None + + """ + LOG.info(_("Stopping WSGI server.")) + + if self._server is not None: + # Resize pool to stop new requests from being processed + self._pool.resize(0) + self._server.kill() + + def wait(self): + """Block, until the server has stopped. + + Waits on the server's eventlet to finish, then returns. + + :returns: None + + """ + try: + if self._server is not None: + self._server.wait() + except greenlet.GreenletExit: + LOG.info(_("WSGI server has stopped.")) + + +class Request(webob.Request): + pass + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = rack.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import rack.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(explanation='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + + """ + raise NotImplementedError(_('You must implement __call__')) + + +class Middleware(Application): + """Base WSGI middleware. + + These classes require an application to be + initialized that will be called next. By default the middleware will + simply call its wrapped app, or you can override __call__ to customize its + behavior. + + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = rack.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import rack.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) + + +class Debug(Middleware): + """Helper class for debugging a WSGI application. + + Can be inserted into any WSGI application chain to get information + about the request and response. + + """ + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + print(('*' * 40) + ' REQUEST ENVIRON') + for key, value in req.environ.items(): + print(key, '=', value) + print() + resp = req.get_response(self.application) + + print(('*' * 40) + ' RESPONSE HEADERS') + for (key, value) in resp.headers.iteritems(): + print(key, '=', value) + print() + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """Iterator that prints the contents of a wrapper string.""" + print(('*' * 40) + ' BODY') + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print() + + +class Router(object): + """WSGI middleware that maps incoming requests to WSGI apps.""" + + def __init__(self, mapper): + """Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be an object that can route + the request to the action-specific method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, '/svrlist', controller=sc, action='list') + + # Actions are all implicitly defined + mapper.resource('server', 'servers', controller=sc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) + + """ + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Route the incoming request to a controller based on self.map. + + If no match, return a 404. + + """ + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=Request) + def _dispatch(req): + """Dispatch the request to the appropriate controller. + + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404 + or the routed WSGI app's response. + + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] + return app + + +class Loader(object): + """Used to load WSGI applications from paste configurations.""" + + def __init__(self, config_path=None): + """Initialize the loader, and attempt to find the config. + + :param config_path: Full or relative path to the paste config. + :returns: None + + """ + self.config_path = None + + config_path = config_path or CONF.api_paste_config + if not os.path.isabs(config_path): + self.config_path = CONF.find_file(config_path) + elif os.path.exists(config_path): + self.config_path = config_path + + if not self.config_path: + raise exception.ConfigNotFound(path=config_path) + + def load_app(self, name): + """Return the paste URLMap wrapped WSGI application. + + :param name: Name of the application to load. + :returns: Paste URLMap object wrapping the requested application. + :raises: `rack.exception.PasteAppNotFound` + + """ + try: + LOG.debug(_("Loading app %(name)s from %(path)s") % + {'name': name, 'path': self.config_path}) + return deploy.loadapp("config:%s" % self.config_path, name=name) + except LookupError as err: + LOG.error(err) + raise exception.PasteAppNotFound(name=name, path=self.config_path) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..44094ba --- /dev/null +++ b/requirements.txt @@ -0,0 +1,36 @@ +pbr>=0.6,<1.0 +SQLAlchemy>=0.7.8,<=0.9.99 +amqplib>=0.6.1 +anyjson>=0.3.3 +argparse +boto>=2.12.0,!=2.13.0 +eventlet>=0.13.0 +Jinja2 +kombu>=2.4.8 +lxml>=2.3 +Routes>=1.12.3 +WebOb>=1.2.3 +greenlet>=0.3.2 +PasteDeploy>=1.5.0 +Paste +sqlalchemy-migrate>=0.8.2,!=0.8.4 +netaddr>=0.7.6 +suds>=0.4 +paramiko>=1.9.0 +pyasn1 +Babel>=1.3 +iso8601>=0.1.9 +jsonschema>=2.0.0,<3.0.0 +python-cinderclient>=1.0.6 +python-neutronclient>=2.3.4,<3 +python-glanceclient>=0.9.0 +python-keystoneclient>=0.7.0 +python-novaclient +six>=1.5.2 +stevedore>=0.14 +websockify>=0.5.1,<0.6 +wsgiref>=0.1.2 +oslo.config>=1.2.0 +oslo.rootwrap +pycadf>=0.4.1 +oslo.messaging>=1.3.0a9 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..9bbc0bf --- /dev/null +++ b/setup.cfg @@ -0,0 +1,29 @@ +[metadata] +name = rack +version = 0.1.0 +summary = Real Application Centric Kernel +description-file = README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = https://wiki.openstack.org/wiki/RACK +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[files] +packages = + rack + +[entry_points] +console_scripts = + rack-api = rack.cmd.api:main + rack-scheduler = rack.cmd.scheduler:main + rack-resourceoperator = rack.cmd.resourceoperator:main + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..9ea4d88 --- /dev/null +++ b/setup.py @@ -0,0 +1,18 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import setuptools + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..8a8079f --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,15 @@ +hacking>=0.8.0,<0.9 +coverage>=3.6 +discover +feedparser +fixtures>=0.3.14 +mock>=1.0 +mox>=0.5.3 +MySQL-python +psycopg2 +pylint==0.25.2 +python-subunit>=0.0.18 +sphinx>=1.1.2,<1.2 +oslosphinx +testrepository>=0.0.18 +testtools>=0.9.34 diff --git a/tools/ansible-openstack/README.md b/tools/ansible-openstack/README.md new file mode 100644 index 0000000..cacbb9e --- /dev/null +++ b/tools/ansible-openstack/README.md @@ -0,0 +1,67 @@ +OpenStack-Ansible +================= + +# Description +This repository contains playbooks for installation of OpenStack Icehouse for CentOS. +These playbooks will.. +* install necessary packages +* install MySQL, AMQP +* install Keystone, Glance, Cinder +* install Neutron (with linunxbridge, vlan, ml2 configuration) +* install Nova, Horizon +* setup provider network + +These playbooks are based on [yosshy/openstack-ansible](https://github.com/yosshy/openstack-ansible) and also [utilizing openstack-ansible-modules](https://github.com/openstack-ansible/openstack-ansible-modules). + + +# Requirements +* Ansible 1.6 or later +* CentOS 6.5 or later +* Internet accessible network + +# Assumptions of host network +We assume following three networks for OpenStack hosts. +* External network +A network end-users access from the Internet to interact virtual instances, dashboard and api. +Also employed to network-gateway node's external link. + +* Internal network +OpenStack components talk to each other via this network. + +* Management network +Ansible accesses OpenStack hosts via this network. + +# Before Running +### automatic ssh login +You can simply copy ssh public key to the remote node with following command. + ssh-copy-id root@ +Also specify ssh private key in ansible.cfg file. + private_key=/root/.ssh/id_rsa + ask_pass = False +### manual ssh login(unrecommended) +Or if you don't like ssh key, comment out private_key_file=** and change ask_pass=True in ansible.cfg file. + # private_key_file=** + ask_pass = True + +### role of each OpenStack nodes +Determine the role of each OpenStack nodes in openstack_hosts file + * frontend: API, horizon + * controller: nova-{conductor,scheduler,etc}, glance-registry, cinder-scheduler, etc. + * network_gateway: all neutron services except neutron-server + * volume_backend: used for cinder-volume and its LVM-based backend + * sql_backend: used for mySql + * amqp_backend: used for AMQP + * compute_backend: nova-compute, neutron-agent and KVM + +### system/openstack settings +Edit system/OpenStack settings in group_vars/all file. +You can set passwords, cinder volume to use, VLAN range, provider network details, etc. in this file. + +### interface mapping +Determine interface mapping of External/Internal/Management for each role nodes. +Edit group_vars/{compute_backend|controller|frontend|network_gateway|volume_backend|sql_backend} and specify which interface to use for which network. + + + + + \ No newline at end of file diff --git a/tools/ansible-openstack/ansible.cfg b/tools/ansible-openstack/ansible.cfg new file mode 100644 index 0000000..42b7aac --- /dev/null +++ b/tools/ansible-openstack/ansible.cfg @@ -0,0 +1,30 @@ +[defaults] + +# inventory file +hostfile = openstack_hosts + +# i don't know what it is. +filter_plugins = filter_plugins + +# remote user for ansible to use during setup +remote_user = root + +# SSH private key to use for accessing remote servers. +# Please make sure that the corresponding public key is installed in remote servers. +private_key_file = /var/lib/jenkins/.ssh/id_rsa + +# You'd better not ask_pass for jenkins +ask_pass = False +#ask_sudo_pass = True + +# uncomment this to disable SSH key host checking +host_key_checking = False + +# SSH timeout +timeout = 20 + +# default serial +forks = 10 + +# log +log_path=log.txt diff --git a/tools/ansible-openstack/group_vars/all b/tools/ansible-openstack/group_vars/all new file mode 100644 index 0000000..67ed9fc --- /dev/null +++ b/tools/ansible-openstack/group_vars/all @@ -0,0 +1,126 @@ +--- +######################### system settings ######################### +openstack_kernel: kernel-2.6.32-358.123.2.openstack.el6.x86_64 +latest_dnsmasq: ftp://fr2.rpmfind.net/linux/dag/redhat/el6/en/x86_64/extras/RPMS/dnsmasq-2.65-1.el6.rfx.x86_64.rpm +ntp_server: 192.168.100.254 +use_your_own_repository: false + +# IP mapping. +frontend_int_ip: "{{ hostvars[groups['frontend'][0]]['my_int_ip'] }}" +frontend_ext_ip: "{{ hostvars[groups['frontend'][0]]['my_ext_ip'] }}" +amqp_host: "{{ hostvars[groups['amqp_backend'][0]]['my_int_ip'] }}" +sql_host: "{{ hostvars[groups['sql_backend'][0]]['my_int_ip'] }}" +glance_registry_host: "{{ hostvars[groups['controller'][0]]['my_int_ip'] }}" +cinder_volume_host: "{{ hostvars[groups['volume_backend'][0]]['my_int_ip'] }}" +nova_metadata_host: "{{ hostvars[groups['frontend'][0]]['my_int_ip'] }}" +vnc_host: "{{ hostvars[groups['controller'][0]]['my_ext_ip'] }}" + + + +######################### OpenStack settings ######################### +log_debug: False +log_verbose: True + + + +#--------- AMQP ---------# +# rabbit settings +amqp_erlang_port: 9100 +amqp_user: admin +amqp_pass: AMQP_PASSWORD + + +#--------- Keystone ---------# +# Endpoint URL +keystone_public_url: http://{{ frontend_ext_ip }}:5000/v2.0 +keystone_internal_url: http://{{ frontend_int_ip }}:5000/v2.0 +keystone_admin_url: http://{{ frontend_int_ip }}:35357/v2.0 +glance_public_url: http://{{ frontend_ext_ip }}:9292 +glance_internal_url: http://{{ frontend_int_ip }}:9292 +glance_admin_url: http://{{ frontend_int_ip }}:9292 +cinder_public_url: http://{{ frontend_ext_ip }}:8776/v1/%(tenant_id)s +cinder_internal_url: http://{{ frontend_int_ip }}:8776/v1/%(tenant_id)s +cinder_admin_url: http://{{ frontend_int_ip }}:8776/v1/%(tenant_id)s +cinderv2_public_url: http://{{ frontend_ext_ip }}:8776/v2/%(tenant_id)s +cinderv2_internal_url: http://{{ frontend_int_ip }}:8776/v2/%(tenant_id)s +cinderv2_admin_url: http://{{ frontend_int_ip }}:8776/v2/%(tenant_id)s +neutron_public_url: http://{{ frontend_ext_ip }}:9696 +neutron_internal_url: http://{{ frontend_int_ip }}:9696 +neutron_admin_url: http://{{ frontend_int_ip }}:9696 +nova_public_url: http://{{ frontend_ext_ip }}:8774/v2/%(tenant_id)s +nova_internal_url: http://{{ frontend_int_ip }}:8774/v2/%(tenant_id)s +nova_admin_url: http://{{ frontend_int_ip }}:8774/v2/%(tenant_id)s +ec2_public_url: http://{{ frontend_ext_ip }}:8773/services/Cloud +ec2_internal_url: http://{{ frontend_int_ip }}:8773/services/Cloud +ec2_admin_url: http://{{ frontend_int_ip }}:8773/services/Admin + +# passwords +heartbeat_secret_key: PASSWORD +root_db_password: PASSWORD +keystone_db_password: PASSWORD +glance_db_password: PASSWORD +nova_db_password: PASSWORD +neutron_db_password: PASSWORD +cinder_db_password: PASSWORD +glance_identity_password: PASSWORD +nova_identity_password: PASSWORD +ec2_identity_password: PASSWORD +swift_identity_password: PASSWORD +neutron_identity_password: PASSWORD +cinder_identity_password: PASSWORD + +# Admin settings +openstack_region: RegionOne +admin_token: 012345SECRET99TOKEN012345 +admin_user: admin +admin_password: PASSWORD +admin_tenant: admin +service_tenant: services + + +#--------- Glance ---------# +glance_images: + - name: cirros-0.3.0-x86_64 + url: https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img + disk_format: qcow2 + - name: cent6.5-20140606 + url: http://192.168.100.253/norarepo/misc/openstack-images/CentOS-6.5.x86_64-ci-heat-20140606.qcow2 + disk_format: qcow2 + +#--------- Cinder ---------# +cinder_volume_dev: /dev/loop0 +cinder_volume: cinder-volume01 + + +#--------- Neutron ---------# +# network parameters +network_gateway: 192.168.100.254 +network_dns: 192.168.100.254 + +# network setting (LinuxBridge, ML2) +VLAN_RANGE: '701:799' + +# provider network +provider_nw_name: publicNW +provider_network_type: flat + +# provider subnetwork +provider_subnet_name: publicSubNet +provider_gateway_ip: 192.168.100.254 +provider_allocation_pool_start: 192.168.100.130 +provider_allocation_pool_end: 192.168.100.139 +provider_cidr: 192.168.100.0/24 +provider_enable_dhcp: 0 + +#--------- Nova ---------# +virt_type: kvm + +# set below if you use multiple frontend/controller +mcast_address_controller: 239.0.0.1 +mcast_address_frontend: 239.0.0.2 + + + +# Automatically generated variables + + diff --git a/tools/ansible-openstack/group_vars/amqp_backend b/tools/ansible-openstack/group_vars/amqp_backend new file mode 100644 index 0000000..a8a5667 --- /dev/null +++ b/tools/ansible-openstack/group_vars/amqp_backend @@ -0,0 +1,11 @@ +my_int_if: eth1 +my_ext_if: eth2 +my_mng_if: eth3 + +my_int_ip: "{{ ansible_eth1.ipv4.address }}" +my_ext_ip: "{{ ansible_eth2.ipv4.address }}" +my_mng_ip: "{{ ansible_eth3.ipv4.address }}" + +my_int_obj: "{{ ansible_eth1 }}" +my_ext_obj: "{{ ansible_eth2 }}" +my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack/group_vars/compute_backend b/tools/ansible-openstack/group_vars/compute_backend new file mode 100644 index 0000000..a8a5667 --- /dev/null +++ b/tools/ansible-openstack/group_vars/compute_backend @@ -0,0 +1,11 @@ +my_int_if: eth1 +my_ext_if: eth2 +my_mng_if: eth3 + +my_int_ip: "{{ ansible_eth1.ipv4.address }}" +my_ext_ip: "{{ ansible_eth2.ipv4.address }}" +my_mng_ip: "{{ ansible_eth3.ipv4.address }}" + +my_int_obj: "{{ ansible_eth1 }}" +my_ext_obj: "{{ ansible_eth2 }}" +my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack/group_vars/controller b/tools/ansible-openstack/group_vars/controller new file mode 100644 index 0000000..a8a5667 --- /dev/null +++ b/tools/ansible-openstack/group_vars/controller @@ -0,0 +1,11 @@ +my_int_if: eth1 +my_ext_if: eth2 +my_mng_if: eth3 + +my_int_ip: "{{ ansible_eth1.ipv4.address }}" +my_ext_ip: "{{ ansible_eth2.ipv4.address }}" +my_mng_ip: "{{ ansible_eth3.ipv4.address }}" + +my_int_obj: "{{ ansible_eth1 }}" +my_ext_obj: "{{ ansible_eth2 }}" +my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack/group_vars/frontend b/tools/ansible-openstack/group_vars/frontend new file mode 100644 index 0000000..a8a5667 --- /dev/null +++ b/tools/ansible-openstack/group_vars/frontend @@ -0,0 +1,11 @@ +my_int_if: eth1 +my_ext_if: eth2 +my_mng_if: eth3 + +my_int_ip: "{{ ansible_eth1.ipv4.address }}" +my_ext_ip: "{{ ansible_eth2.ipv4.address }}" +my_mng_ip: "{{ ansible_eth3.ipv4.address }}" + +my_int_obj: "{{ ansible_eth1 }}" +my_ext_obj: "{{ ansible_eth2 }}" +my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack/group_vars/network_gateway b/tools/ansible-openstack/group_vars/network_gateway new file mode 100644 index 0000000..a8a5667 --- /dev/null +++ b/tools/ansible-openstack/group_vars/network_gateway @@ -0,0 +1,11 @@ +my_int_if: eth1 +my_ext_if: eth2 +my_mng_if: eth3 + +my_int_ip: "{{ ansible_eth1.ipv4.address }}" +my_ext_ip: "{{ ansible_eth2.ipv4.address }}" +my_mng_ip: "{{ ansible_eth3.ipv4.address }}" + +my_int_obj: "{{ ansible_eth1 }}" +my_ext_obj: "{{ ansible_eth2 }}" +my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack/group_vars/sql_backend b/tools/ansible-openstack/group_vars/sql_backend new file mode 100644 index 0000000..a8a5667 --- /dev/null +++ b/tools/ansible-openstack/group_vars/sql_backend @@ -0,0 +1,11 @@ +my_int_if: eth1 +my_ext_if: eth2 +my_mng_if: eth3 + +my_int_ip: "{{ ansible_eth1.ipv4.address }}" +my_ext_ip: "{{ ansible_eth2.ipv4.address }}" +my_mng_ip: "{{ ansible_eth3.ipv4.address }}" + +my_int_obj: "{{ ansible_eth1 }}" +my_ext_obj: "{{ ansible_eth2 }}" +my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack/group_vars/volume_backend b/tools/ansible-openstack/group_vars/volume_backend new file mode 100644 index 0000000..a8a5667 --- /dev/null +++ b/tools/ansible-openstack/group_vars/volume_backend @@ -0,0 +1,11 @@ +my_int_if: eth1 +my_ext_if: eth2 +my_mng_if: eth3 + +my_int_ip: "{{ ansible_eth1.ipv4.address }}" +my_ext_ip: "{{ ansible_eth2.ipv4.address }}" +my_mng_ip: "{{ ansible_eth3.ipv4.address }}" + +my_int_obj: "{{ ansible_eth1 }}" +my_ext_obj: "{{ ansible_eth2 }}" +my_mng_obj: "{{ ansible_eth3 }}" diff --git a/tools/ansible-openstack/openstack-ansible-modules/README.md b/tools/ansible-openstack/openstack-ansible-modules/README.md new file mode 100644 index 0000000..027926e --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/README.md @@ -0,0 +1,84 @@ +# Ansible modules for managing OpenStack + +These are additional, unofficial Ansible modules for managing OpenStack. + +These are a dependency of the [openstack-ansible][1] repo for doing a test deployment of OpenStack into virtual machines managed by vagrant. + +To use this, add this directory to to the ANSIBLE_LIBRARY environment variable, or symlink this diretory to ./library relative to the playbook that uses it. + +[1]: http://github.com/lorin/openstack-ansible + +## keystone_manage + +Initialize the keystone database: + + keystone_manage: action=db_sync + +This is the equivalent of: + + # keystone-manage db_sync + + +## keystone_user + +Manage users, tenants, and roles + +Create a tenant + + keystone_user: token=$admin_token tenant=demo tenant_description="Default Tenant" + +Create a user + + keystone_user: token=$admin_token user=admin tenant=demo password=secrete + +Create and apply a role: + + keystone_user: token=$admin_token role=admin user=admin tenant=demo + +## keystone_service + +Manage services and endpoints + + keystone_service: token=$admin_token name=keystone type=identity desecription="Identity Service" public_url="http://192.168.206.130:5000/v2.0" internal_url="http://192.168.206.130:5000/v2.0" admin_url="http://192.168.206.130:35357/v2.0" + +You can use `url` as an alias for `public_url`. If you don't specify internal and admin urls, they will default to the same value of public url. For example: + + keystone_service: token=$admin_token name=nova type=compute description="Compute Service" url=http://192.168.206.130:8774/v2/%(tenant_id)s + + +## glance_manage + +Initialize the glance database: + + glance_manage: action=db_sync + +This is the (idempotent) equivalent of: + + # glance-manage version_control 0 + # glance-manage db_sync + + +## glance + +Add images + + glance: name=cirros file=/tmp/cirros-0.3.0-x86_64-disk.img disk_format=qcow2 is_public=true user=admin tenant=demo password=secrete region=RegionOne auth_url=http://192.168.206.130:5000/v2.0 + +## Not yet supported +- Disabled tenants +- Deleting users +- Deleting roles +- Deleting services +- Deleting endpoints +- Deleting images +- Updating tenants +- Updating users +- Updating services +- Updating endpoints +- Multiple endpoints per service +- Updating images + + +## Will probably never be supported +- Non-unique names for tenants, users, roles, services and images. + diff --git a/tools/ansible-openstack/openstack-ansible-modules/cinder_manage b/tools/ansible-openstack/openstack-ansible-modules/cinder_manage new file mode 100644 index 0000000..17c42b5 --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/cinder_manage @@ -0,0 +1,102 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: cinder_manage +short_description: Initialize OpenStack Block Storage (cinder) database +description: Create the tables for the database backend used by cinder +options: + action: + description: + - action to perform. Currently only dbysnc is supported + required: true + conf: + description: + - path to cinder config file. + required: false + default: /etc/cinder/cinder.conf +requirements: [ cinder ] +author: Lorin Hochstein +''' + +EXAMPLES = ''' +cinder_manage: action=dbsync +''' + +import subprocess + +try: + from cinder.db.sqlalchemy import migration + from cinder import flags +except ImportError: + cinder_found = False +else: + cinder_found = True + + +def load_config_file(conf): + flags.FLAGS(args=[], project='cinder', + default_config_files=[conf]) + + +def will_db_change(): + """ Check if the database version will change after the sync. + + """ + # Load the config file options + current_version = migration.db_version() + repository = migration._find_migrate_repo() + repo_version = repository.latest + return current_version != repo_version + + +def do_dbsync(): + """Do the dbsync. Returns (returncode, stdout, stderr)""" + # We call cinder-manage db_sync on the shell rather than trying to + # do this in Python since we have no guarantees about changes to the + # internals. + args = ['cinder-manage', 'db', 'sync'] + + call = subprocess.Popen(args, shell=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = call.communicate() + return (call.returncode, out, err) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + action=dict(required=True), + conf=dict(required=False, default="/etc/cinder/cinder.conf") + ), + supports_check_mode=True + ) + + if not cinder_found: + module.fail_json(msg="cinder package could not be found") + + action = module.params['action'] + conf = module.params['conf'] + + if action not in ['dbsync', 'db_sync']: + module.fail_json(msg="Only supported action is 'dbsync'") + + load_config_file(conf) + + changed = will_db_change() + if module.check_mode: + module.exit_json(changed=changed) + + (res, stdout, stderr) = do_dbsync() + + if res == 0: + module.exit_json(changed=changed, stdout=stdout, stderr=stderr) + else: + module.fail_json(msg="cinder-manage returned non-zero value: %d" % res, + stdout=stdout, stderr=stderr) + +# this is magic, see lib/ansible/module_common.py +#<> +main() diff --git a/tools/ansible-openstack/openstack-ansible-modules/glance b/tools/ansible-openstack/openstack-ansible-modules/glance new file mode 100644 index 0000000..b4c91a8 --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/glance @@ -0,0 +1,164 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: glance +short_description: Manage OpenStack virtual machine images +description: + - Upload virtual machine images to OpenStack Image Service (glance) +requirements: [ python-glanceclient ] +options: + name: + description: + - name of the image + required: true + format: + description: + - disk format + choices: [ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, iso] + required: true + is_public: + description: + - if true, image is public + choices: [true, false] + aliases: [public] + required: false + default: false + file: + description: + - path to the file that contains the image + required: true + aliases: [path] + auth_url: + description: + - URL to Identity service (keystone) catalog endpoint + required: true + region: + description: + - OpenStack region name + required: false + aliases: [region_name] + username: + description: + - user name to authenticate against Identity service + aliases: [user, user_name, login_user] + password: + description: + - password to authenticate against Identiy service + aliases: [pass, login_password] + tenant_name: + description: + - name of the tenant + +examples: + - code: 'glance: name=cirros file=/tmp/cirros.img format=qcow2 is_public=true auth_url=http://192.168.206.130:5000/v2.0/ username=admin tenant_name=demo password=secrete region=RegionOne ' +''' + + +try: + from glanceclient import Client + from keystoneclient.v2_0 import client as ksclient +except ImportError: + glanceclient_found = False +else: + glanceclient_found = True + + +def get_token_and_endpoint(auth_url, username, password, tenant_name, + region_name): + + keystone = ksclient.Client(username=username, + password=password, + tenant_name=tenant_name, + auth_url=auth_url, + region_name=region_name) + glance_endpoint = keystone.service_catalog.url_for( + service_type="image", + endpoint_type="publicURL") + return (keystone.auth_token, glance_endpoint) + + +def authenticate(auth_url, username, password, tenant_name, region, + version='1'): + """Return a keystone client object""" + + (token, endpoint) = get_token_and_endpoint(auth_url, username, password, + tenant_name, region) + + return Client(version, endpoint=endpoint, token=token) + + +def get_images(glance, name): + """ Retrieve all images with a certain name """ + images = [x for x in glance.images.list() if x.name == name] + return images + + +def create_image(glance, name, path, disk_format, is_public, check_mode): + """ Create a new image from a file on the path. + + Return a pair. First element indicates whether a change occurred, + second one is the ID of the iamge """ + + # If the image(s) already exists, we're done + images = get_images(glance, name) + if len(images) > 0: + return (False, images[0].id) + + if check_mode: + return (True, None) + + image = glance.images.create(name=name, disk_format=disk_format, + container_format='bare', + is_public=is_public) + image.update(data=open(path, 'rb')) + return (True, image.id) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + file=dict(required=True, aliases=['path']), + auth_url=dict(required=True), + region=dict(required=False, aliases=['region_name']), + username=dict(required=True, aliases=['user', + 'user_name', + 'login_user']), + password=dict(required=True, aliases=['pass', 'login_password']), + tenant_name=dict(required=True, aliases=['tenant']), + disk_format=dict(required=True, + choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', + 'qcow2', 'vdi', 'iso'], + aliases=['disk-format', 'format']), + is_public=dict(required=False, + default=False, + aliases=['public']) + ), + supports_check_mode=True + ) + + name = module.params['name'] + path = module.params['file'] + auth_url = module.params['auth_url'] + region = module.params['region'] + username = module.params['username'] + password = module.params['password'] + tenant_name = module.params['tenant_name'] + disk_format = module.params['disk_format'] + is_public = module.params['is_public'] + check_mode = module.check_mode + + glance = authenticate(auth_url, username, password, tenant_name, region) + + (changed, id) = create_image(glance, name, path, disk_format, is_public, + check_mode) + + module.exit_json(changed=changed, name=name, id=id) + +# this is magic, see lib/ansible/module_common.py +#<> +if __name__ == '__main__': + main() diff --git a/tools/ansible-openstack/openstack-ansible-modules/glance_manage b/tools/ansible-openstack/openstack-ansible-modules/glance_manage new file mode 100644 index 0000000..e02f75e --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/glance_manage @@ -0,0 +1,124 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: glance_manage +short_description: Initialize OpenStack Image (glance) database +description: Create the tables for the database backend used by glance +options: + action: + description: + - action to perform. Currently only dbsync is supported. + required: true + conf: + description: + - path to glance-registry config file. + required: false + default: /etc/glance/glance-registry.conf +requirements: [ glance ] +author: Lorin Hochstein +''' + +EXAMPLES = ''' +glance_manage: action=dbsync +''' + +# this is necessary starting from havana release due to bug 885529 +# https://bugs.launchpad.net/glance/+bug/885529 +from glance.openstack.common import gettextutils +gettextutils.install('glance') +import glance.db.sqlalchemy.api + +try: + from glance.db.sqlalchemy import migration + from glance.common.exception import DatabaseMigrationError + from migrate.versioning import api as versioning_api +except ImportError: + glance_found = False +else: + glance_found = True + +import subprocess + +def is_under_version_control(conf): + """ Return true if the database is under version control""" + migration.CONF(project='glance', default_config_files=[conf]) + try: + migration.db_version() + except DatabaseMigrationError: + return False + else: + return True + + +def will_db_change(conf): + """ Check if the database version will change after the sync """ + # Load the config file options + if not is_under_version_control(conf): + return True + migration.CONF(project='glance', default_config_files=[conf]) + current_version = migration.db_version() + repo_path = migration.get_migrate_repo_path() + repo_version = versioning_api.repository.Repository(repo_path).latest + return current_version != repo_version + + +def put_under_version_control(): + """ Create the initial sqlalchemy migrate database tables. """ + args = ['glance-manage', 'version_control', '0'] + + call = subprocess.Popen(args, shell=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = call.communicate() + return (call.returncode, out, err) + + +def do_dbsync(): + """ Do a database migration """ + args = ['glance-manage', 'db_sync'] + + call = subprocess.Popen(args, shell=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = call.communicate() + return (call.returncode, out, err) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + action=dict(required=True), + conf=dict(required=False, + default="/etc/glance/glance-registry.conf") + ), + supports_check_mode=True + ) + if not glance_found: + module.fail_json(msg="glance package could not be found") + + action = module.params['action'] + if action not in ['dbsync', 'db_sync']: + module.fail_json(msg="Only supported action is 'dbsync'") + + conf = module.params['conf'] + + changed = will_db_change(conf) + if module.check_mode: + module.exit_json(changed=changed) + + if not is_under_version_control(conf): + (res, stdout, stderr) = put_under_version_control() + if res != 0: + msg = "failed to put glance db under version control" + module.fail_json(msg=msg, stdout=stdout, stderr=stderr) + + (res, stdout, stderr) = do_dbsync() + if res != 0: + msg = "failed to synchronize glance db with repository" + module.fail_json(msg=msg, stdout=stdout, stderr=stderr) + + module.exit_json(changed=changed) + +#<> +main() diff --git a/tools/ansible-openstack/openstack-ansible-modules/keystone_manage b/tools/ansible-openstack/openstack-ansible-modules/keystone_manage new file mode 100644 index 0000000..37161a0 --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/keystone_manage @@ -0,0 +1,111 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: keystone_manage +short_description: Initialize OpenStack Identity (keystone) database +description: Create the tables for the database backend used by keystone +options: + action: + description: + - action to perform. Currently only dbysnc is supported + required: true + conf: + description: + - path to keystone config file. + required: false + default: /etc/keystone/keystone.conf +requirements: [ keystone ] +author: Lorin Hochstein +''' + +EXAMPLES = ''' +keystone_manage: action=dbsync +''' + +import subprocess + +try: + # this is necessary starting from havana release due to bug 885529 + # https://bugs.launchpad.net/glance/+bug/885529 + from keystone.openstack.common import gettextutils + gettextutils.install('keystone') +except AttributeError: + # this is not havana + pass + +try: + from keystone.common.sql import migration + from migrate.versioning import api as versioning_api +except ImportError: + keystone_found = False +else: + keystone_found = True + + +def will_db_change(conf): + """ Check if the database version will change after the sync. + + conf is the path to the keystone config file + + """ + # Load the config file options + migration.CONF(project='keystone', default_config_files=[conf]) + current_version = migration.db_version() + + # in havana the method _find_migrate_repo has been renamed to find_migrate_repo + try: + repo_path = migration.find_migrate_repo() + except AttributeError: + repo_path = migration._find_migrate_repo() + repo_version = versioning_api.repository.Repository(repo_path).latest + return current_version != repo_version + + +def do_dbsync(): + """Do the dbsync. Returns (returncode, stdout, stderr)""" + # We call keystone-manage db_sync on the shell rather than trying to + # do this in Python since we have no guarantees about changes to the + # internals. + args = ['keystone-manage', 'db_sync'] + + call = subprocess.Popen(args, shell=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = call.communicate() + return (call.returncode, out, err) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + action=dict(required=True), + conf=dict(required=False, default="/etc/keystone/keystone.conf") + ), + supports_check_mode=True + ) + + if not keystone_found: + module.fail_json(msg="keystone package could not be found") + + action = module.params['action'] + conf = module.params['conf'] + if action not in ['dbsync', 'db_sync']: + module.fail_json(msg="Only supported action is 'dbsync'") + + changed = will_db_change(conf) + if module.check_mode: + module.exit_json(changed=changed) + + (res, stdout, stderr) = do_dbsync() + + if res == 0: + module.exit_json(changed=changed, stdout=stdout, stderr=stderr) + else: + module.fail_json(msg="keystone-manage returned non-zero value: %d" % res, + stdout=stdout, stderr=stderr) + +# this is magic, see lib/ansible/module_common.py +#<> +main() diff --git a/tools/ansible-openstack/openstack-ansible-modules/keystone_service b/tools/ansible-openstack/openstack-ansible-modules/keystone_service new file mode 100644 index 0000000..9c038a4 --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/keystone_service @@ -0,0 +1,302 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: keystone_service +short_description: Manage OpenStack Identity (keystone) service endpoints +options: + name: + description: + - name of service (e.g., keystone) + required: yes + type: + description: + - type of service (e.g., identity) + required: yes + description: + description: + - description of service (e.g., Identity Service) + required: yes + public_url: + description: + - public url of service. + - 'Alias: I(url)' + - 'Alias: I(publicurl)' + required: yes + internal_url: + description: + - internal url of service. + - 'Alias: I(internalurl)' + required: no + default: value of public_url + admin_url: + description: + - admin url of service. + - 'Alias: I(adminurl)' + required: no + default: value of public_url + insecure: + description: + - allow use of self-signed SSL certificates + required: no + choices: [ "yes", "no" ] + region: + description: + - region of service + required: yes + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + + + +requirements: [ python-keystoneclient ] +author: Lorin Hochstein +''' + +EXAMPLES = ''' +examples: +keystone_service: > + name=keystone + type=identity + description="Keystone Identity Service" + publicurl=http://192.168.206.130:5000/v2.0 + internalurl=http://192.168.206.130:5000/v2.0 + adminurl=http://192.168.206.130:35357/v2.0 + +keystone_service: > + name=glance + type=image + description="Glance Identity Service" + url=http://192.168.206.130:9292 + +''' + +try: + from keystoneclient.v2_0 import client +except ImportError: + keystoneclient_found = False +else: + keystoneclient_found = True + +import traceback + + +def authenticate(endpoint, token, login_user, login_password, insecure): + """Return a keystone client object""" + + if token: + return client.Client(endpoint=endpoint, token=token, insecure=insecure) + else: + return client.Client(endpoint=endpoint, username=login_user, + password=login_password, insecure=insecure) + +def get_service(keystone, name): + """ Retrieve a service by name """ + services = [x for x in keystone.services.list() if x.name == name] + count = len(services) + if count == 0: + raise KeyError("No keystone services with name %s" % name) + elif count > 1: + raise ValueError("%d services with name %s" % (count, name)) + else: + return services[0] + + +def get_endpoint(keystone, name): + """ Retrieve a service endpoint by name """ + service = get_service(keystone, name) + endpoints = [x for x in keystone.endpoints.list() + if x.service_id == service.id] + count = len(endpoints) + if count == 0: + raise KeyError("No keystone endpoints with service name %s" % name) + elif count > 1: + raise ValueError("%d endpoints with service name %s" % (count, name)) + else: + return endpoints[0] + + +def ensure_service_present(keystone, name, service_type, description, + check_mode): + """ Ensure the service is present and has the right values + + Returns a pair, where the first element is a boolean that indicates + a state change, and the second element is the service uuid, or None + if running in check mode""" + service = None + try: + service = get_service(keystone, name) + except: + # Service doesn't exist yet, we'll need to create one + pass + else: + # See if it matches exactly + if service.name == name and \ + service.type == service_type and \ + service.description == description: + + # Same, no changes needed + return (False, service.id) + + # At this point, we know we will need to make a change + if check_mode: + return (True, None) + + if service is None: + service = keystone.services.create(name=name, + service_type=service_type, + description=description) + return (True, service.id) + else: + msg = "keystone v2 API doesn't support updating services" + raise ValueError(msg) + + +def ensure_endpoint_present(keystone, name, public_url, internal_url, + admin_url, region, check_mode): + """ Ensure the service endpoint is present and have the right values + + Assumes the service object has already been created at this point""" + + service = get_service(keystone, name) + endpoint = None + try: + endpoint = get_endpoint(keystone, name) + except: + # Endpoint doesn't exist yet, we'll need to create one + pass + else: + # See if it matches + if endpoint.publicurl == public_url and \ + endpoint.adminurl == admin_url and \ + endpoint.internalurl == internal_url and \ + endpoint.region == region: + + # Same, no changes needed + return (False, endpoint.id) + + # At this point, we know we will need to make a change + if check_mode: + return (True, None) + + if endpoint is None: + endpoint = keystone.endpoints.create(region=region, + service_id=service.id, + publicurl=public_url, + adminurl=admin_url, + internalurl=internal_url) + return (True, endpoint.id) + else: + msg = "keystone v2 API doesn't support updating endpoints" + raise ValueError(msg) + + +def ensure_service_absent(keystone, name, check_mode): + """ Ensure the service is absent""" + + raise NotImplementedError() + +def ensure_endpoint_absent(keystone, name, check_mode): + """ Ensure the service endpoint """ + raise NotImplementedError() + + +def dispatch(keystone, name, service_type, description, public_url, + internal_url, admin_url, region, state, check_mode): + + if state == 'present': + (service_changed, service_id) = ensure_service_present(keystone, + name, + service_type, + description, + check_mode) + + (endpoint_changed, endpoint_id) = ensure_endpoint_present( + keystone, + name, + public_url, + internal_url, + admin_url, + region, + check_mode) + return dict(changed=service_changed or endpoint_changed, + service_id=service_id, + endpoint_id=endpoint_id) + elif state == 'absent': + endpoint_changed = ensure_endpoint_absent(keystone, name, check_mode) + service_changed = ensure_service_absent(keystone, name, check_mode) + return dict(changed=service_changed or endpoint_changed) + else: + raise ValueError("Code should never reach here") + + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + type=dict(required=True), + description=dict(required=False), + public_url=dict(required=True, aliases=['url', 'publicurl']), + internal_url=dict(required=False, aliases=['internalurl']), + admin_url=dict(required=False, aliases=['adminurl']), + region=dict(required=True), + state=dict(default='present', choices=['present', 'absent']), + endpoint=dict(required=False, + default="http://127.0.0.1:35357/v2.0"), + token=dict(required=False), + insecure=dict(required=False, default=False, choices=BOOLEANS), + + login_user=dict(required=False), + login_password=dict(required=False) + ), + supports_check_mode=True, + mutually_exclusive=[['token', 'login_user'], + ['token', 'login_password']] + ) + + endpoint = module.params['endpoint'] + token = module.params['token'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + insecure = module.boolean(module.params['insecure']) + name = module.params['name'] + service_type = module.params['type'] + description = module.params['description'] + public_url = module.params['public_url'] + internal_url = module.params['internal_url'] + if internal_url is None: + internal_url = public_url + admin_url = module.params['admin_url'] + if admin_url is None: + admin_url = public_url + region = module.params['region'] + state = module.params['state'] + + keystone = authenticate(endpoint, token, login_user, login_password, insecure) + check_mode = module.check_mode + + try: + d = dispatch(keystone, name, service_type, description, + public_url, internal_url, admin_url, region, state, + check_mode) + except Exception: + if check_mode: + # If we have a failure in check mode + module.exit_json(changed=True, + msg="exception: %s" % traceback.format_exc()) + else: + module.fail_json(msg=traceback.format_exc()) + else: + module.exit_json(**d) + + +# this is magic, see lib/ansible/module_common.py +#<> +if __name__ == '__main__': + main() diff --git a/tools/ansible-openstack/openstack-ansible-modules/neutron_floating_ip b/tools/ansible-openstack/openstack-ansible-modules/neutron_floating_ip new file mode 100644 index 0000000..1ddd4d2 --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/neutron_floating_ip @@ -0,0 +1,242 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + from novaclient.v1_1 import client as nova_client + from neutronclient.neutron import client + from keystoneclient.v2_0 import client as ksclient + import time +except ImportError: + print("failed=True msg='glanceclient,keystoneclient and neutronclient client are required'") + +DOCUMENTATION = ''' +--- +module: neutron_floating_ip +version_added: "1.2" +short_description: Add/Remove floating IP from an instance +description: + - Add or Remove a floating IP to an instance +options: + login_username: + description: + - login username to authenticate to keystone + required: true + default: admin + login_password: + description: + - Password of login user + required: true + default: 'yes' + login_tenant_name: + description: + - The tenant name of the login user + required: true + default: 'yes' + auth_url: + description: + - The keystone url for authentication + required: false + default: 'http://127.0.0.1:35357/v2.0/' + region_name: + description: + - Name of the region + required: false + default: None + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + network_name: + description: + - Name of the network from which IP has to be assigned to VM. Please make sure the network is an external network + required: true + default: None + instance_name: + description: + - The name of the instance to which the IP address should be assigned + required: true + default: None +requirements: ["novaclient", "neutronclient", "keystoneclient"] +''' + +EXAMPLES = ''' +# Assign a floating ip to the instance from an external network +- neutron_floating_ip: state=present login_username=admin login_password=admin + login_tenant_name=admin network_name=external_network + instance_name=vm1 +''' + +def _get_ksclient(module, kwargs): + try: + kclient = ksclient.Client(username=kwargs.get('login_username'), + password=kwargs.get('login_password'), + tenant_name=kwargs.get('login_tenant_name'), + auth_url=kwargs.get('auth_url')) + except Exception as e: + module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) + global _os_keystone + _os_keystone = kclient + return kclient + + +def _get_endpoint(module, ksclient): + try: + endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') + except Exception as e: + module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + return endpoint + +def _get_neutron_client(module, kwargs): + _ksclient = _get_ksclient(module, kwargs) + token = _ksclient.auth_token + endpoint = _get_endpoint(module, _ksclient) + kwargs = { + 'token': token, + 'endpoint_url': endpoint + } + try: + neutron = client.Client('2.0', **kwargs) + except Exception as e: + module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) + return neutron + +def _get_server_state(module, nova): + server_info = None + server = None + try: + for server in nova.servers.list(): + if server: + info = server._info + if info['name'] == module.params['instance_name']: + if info['status'] != 'ACTIVE' and module.params['state'] == 'present': + module.fail_json( msg="The VM is available but not Active. state:" + info['status']) + server_info = info + break + except Exception as e: + module.fail_json(msg = "Error in getting the server list: %s" % e.message) + return server_info, server + +def _get_port_info(neutron, module, instance_id): + kwargs = { + 'device_id': instance_id, + } + try: + ports = neutron.list_ports(**kwargs) + except Exception as e: + module.fail_json( msg = "Error in listing ports: %s" % e.message) + if not ports['ports']: + return None, None + return ports['ports'][0]['fixed_ips'][0]['ip_address'], ports['ports'][0]['id'] + +def _get_floating_ip(module, neutron, fixed_ip_address): + kwargs = { + 'fixed_ip_address': fixed_ip_address + } + try: + ips = neutron.list_floatingips(**kwargs) + except Exception as e: + module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) + if not ips['floatingips']: + return None, None + return ips['floatingips'][0]['id'], ips['floatingips'][0]['floating_ip_address'] + +def _create_floating_ip(neutron, module, port_id, net_id): + kwargs = { + 'port_id': port_id, + 'floating_network_id': net_id + } + try: + result = neutron.create_floatingip({'floatingip': kwargs}) + except Exception as e: + module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) + module.exit_json(changed=True, result=result, public_ip=result['floatingip']['floating_ip_address']) + +def _get_net_id(neutron, module): + kwargs = { + 'name': module.params['network_name'], + } + try: + networks = neutron.list_networks(**kwargs) + except Exception as e: + module.fail_json("Error in listing neutron networks: %s" % e.message) + if not networks['networks']: + return None + return networks['networks'][0]['id'] + +def _update_floating_ip(neutron, module, port_id, floating_ip_id): + kwargs = { + 'port_id': port_id + } + try: + result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) + except Exception as e: + module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) + module.exit_json(changed=True, result=result) + + +def main(): + + module = AnsibleModule( + argument_spec = dict( + login_username = dict(default='admin'), + login_password = dict(required=True), + login_tenant_name = dict(required='True'), + auth_url = dict(default='http://127.0.0.1:35357/v2.0/'), + region_name = dict(default=None), + network_name = dict(required=True), + instance_name = dict(required=True), + state = dict(default='present', choices=['absent', 'present']) + ), + ) + + try: + nova = nova_client.Client(module.params['login_username'], module.params['login_password'], + module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') + neutron = _get_neutron_client(module, module.params) + except Exception as e: + module.fail_json(msg="Error in authenticating to nova: %s" % e.message) + + server_info, server_obj = _get_server_state(module, nova) + if not server_info: + module.fail_json(msg="The instance name provided cannot be found") + + fixed_ip, port_id = _get_port_info(neutron, module, server_info['id']) + if not port_id: + module.fail_json(msg="Cannot find a port for this instance, maybe fixed ip is not assigned") + + floating_id, floating_ip = _get_floating_ip(module, neutron, fixed_ip) + + if module.params['state'] == 'present': + if floating_ip: + module.exit_json(changed = False, public_ip=floating_ip) + net_id = _get_net_id(neutron, module) + if not net_id: + module.fail_json(msg = "cannot find the network specified, please check") + _create_floating_ip(neutron, module, port_id, net_id) + + if module.params['state'] == 'absent': + if floating_ip: + _update_floating_ip(neutron, module, None, floating_id) + module.exit_json(changed=False) + +# this is magic, see lib/ansible/module.params['common.py +from ansible.module_utils.basic import * +main() + + diff --git a/tools/ansible-openstack/openstack-ansible-modules/neutron_network b/tools/ansible-openstack/openstack-ansible-modules/neutron_network new file mode 100644 index 0000000..6dee045 --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/neutron_network @@ -0,0 +1,282 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + from neutronclient.neutron import client + from keystoneclient.v2_0 import client as ksclient +except ImportError: + print("failed=True msg='neutronclient and keystone client are required'") + +DOCUMENTATION = ''' +--- +module: neutron_network +version_added: "1.4" +short_description: Creates/Removes networks from OpenStack +description: + - Add or Remove network from OpenStack. +options: + login_username: + description: + - login username to authenticate to keystone + required: true + default: admin + login_password: + description: + - Password of login user + required: true + default: 'yes' + login_tenant_name: + description: + - The tenant name of the login user + required: true + default: 'yes' + tenant_name: + description: + - The name of the tenant for whom the network is created + required: false + default: None + auth_url: + description: + - The keystone url for authentication + required: false + default: 'http://127.0.0.1:35357/v2.0/' + region_name: + description: + - Name of the region + required: false + default: None + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + name: + description: + - Name to be assigned to the nework + required: true + default: None + provider_network_type: + description: + - The type of the network to be created, gre, vxlan, vlan, local. Available types depend on the plugin. The Neutron service decides if not specified. + required: false + default: None + provider_physical_network: + description: + - The physical network which would realize the virtual network for flat and vlan networks. + required: false + default: None + provider_segmentation_id: + description: + - The id that has to be assigned to the network, in case of vlan networks that would be vlan id, for gre the tunnel id and for vxlan the VNI. + required: false + default: None + router_external: + description: + - If 'yes', specifies that the virtual network is a external network (public). + required: false + default: false + shared: + description: + - Whether this network is shared or not + required: false + default: false + admin_state_up: + description: + - Whether the state should be marked as up or down + required: false + default: true +requirements: ["neutronclient", "keystoneclient"] + +''' + +EXAMPLES = ''' +# Create a GRE backed Neutron network with tunnel id 1 for tenant1 +- neutron_network: name=t1network tenant_name=tenant1 state=present + provider_network_type=gre provider_segmentation_id=1 + login_username=admin login_password=admin login_tenant_name=admin + +# Create an external network +- neutron_network: name=external_network state=present + provider_network_type=local router_external=yes + login_username=admin login_password=admin login_tenant_name=admin +''' + +_os_keystone = None +_os_tenant_id = None + +def _get_ksclient(module, kwargs): + try: + kclient = ksclient.Client(username=kwargs.get('login_username'), + password=kwargs.get('login_password'), + tenant_name=kwargs.get('login_tenant_name'), + auth_url=kwargs.get('auth_url')) + except Exception as e: + module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) + global _os_keystone + _os_keystone = kclient + return kclient + + +def _get_endpoint(module, ksclient): + try: + endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') + except Exception as e: + module.fail_json(msg = "Error getting endpoint for Neutron: %s " %e.message) + return endpoint + +def _get_neutron_client(module, kwargs): + _ksclient = _get_ksclient(module, kwargs) + token = _ksclient.auth_token + endpoint = _get_endpoint(module, _ksclient) + kwargs = { + 'token': token, + 'endpoint_url': endpoint + } + try: + neutron = client.Client('2.0', **kwargs) + except Exception as e: + module.fail_json(msg = " Error in connecting to Neutron: %s " %e.message) + return neutron + +def _set_tenant_id(module): + global _os_tenant_id + if not module.params['tenant_name']: + tenant_name = module.params['login_tenant_name'] + else: + tenant_name = module.params['tenant_name'] + + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break + if not _os_tenant_id: + module.fail_json(msg = "The tenant id cannot be found, please check the paramters") + + +def _get_net_id(neutron, module): + kwargs = { + 'tenant_id': _os_tenant_id, + 'name': module.params['name'], + } + try: + networks = neutron.list_networks(**kwargs) + except Exception as e: + module.fail_json(msg = "Error in listing Neutron networks: %s" % e.message) + if not networks['networks']: + return None + return networks['networks'][0]['id'] + +def _create_network(module, neutron): + + neutron.format = 'json' + + network = { + 'name': module.params.get('name'), + 'tenant_id': _os_tenant_id, + 'provider:network_type': module.params.get('provider_network_type'), + 'provider:physical_network': module.params.get('provider_physical_network'), + 'provider:segmentation_id': module.params.get('provider_segmentation_id'), + 'router:external': module.params.get('router_external'), + 'shared': module.params.get('shared'), + 'admin_state_up': module.params.get('admin_state_up'), + } + + if module.params['provider_network_type'] == 'local': + network.pop('provider:physical_network', None) + network.pop('provider:segmentation_id', None) + + if module.params['provider_network_type'] == 'flat': + network.pop('provider:segmentation_id', None) + + if module.params['provider_network_type'] == 'gre': + network.pop('provider:physical_network', None) + + if module.params['provider_network_type'] == 'vxlan': + network.pop('provider:physical_network', None) + + if module.params['provider_network_type'] is None: + network.pop('provider:network_type', None) + network.pop('provider:physical_network', None) + network.pop('provider:segmentation_id', None) + + try: + net = neutron.create_network({'network':network}) + except Exception as e: + module.fail_json(msg = "Error in creating network: %s" % e.message) + return net['network']['id'] + +def _delete_network(module, net_id, neutron): + + try: + id = neutron.delete_network(net_id) + except Exception as e: + module.fail_json(msg = "Error in deleting the network: %s" % e.message) + return True + +def main(): + + module = AnsibleModule( + argument_spec = dict( + login_username = dict(default='admin'), + login_password = dict(required=True), + login_tenant_name = dict(required='True'), + auth_url = dict(default='http://127.0.0.1:35357/v2.0/'), + region_name = dict(default=None), + name = dict(required=True), + tenant_name = dict(default=None), + provider_network_type = dict(default=None, choices=['local', 'vlan', 'flat', 'gre', 'vxlan']), + provider_physical_network = dict(default=None), + provider_segmentation_id = dict(default=None), + router_external = dict(default=False, type='bool'), + shared = dict(default=False, type='bool'), + admin_state_up = dict(default=True, type='bool'), + state = dict(default='present', choices=['absent', 'present']) + ), + ) + + if module.params['provider_network_type'] in ['vlan' , 'flat']: + if not module.params['provider_physical_network']: + module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.") + + if module.params['provider_network_type'] in ['vlan', 'gre', 'vxlan']: + if not module.params['provider_segmentation_id']: + module.fail_json(msg = " for vlan, gre & vxlan networks, variable provider_segmentation_id should be set.") + + neutron = _get_neutron_client(module, module.params) + + _set_tenant_id(module) + + if module.params['state'] == 'present': + network_id = _get_net_id(neutron, module) + if not network_id: + network_id = _create_network(module, neutron) + module.exit_json(changed = True, result = "Created", id = network_id) + else: + module.exit_json(changed = False, result = "Success", id = network_id) + + if module.params['state'] == 'absent': + network_id = _get_net_id(neutron, module) + if not network_id: + module.exit_json(changed = False, result = "Success") + else: + _delete_network(module, network_id, neutron) + module.exit_json(changed = True, result = "Deleted") + +# this is magic, see lib/ansible/module.params['common.py +from ansible.module_utils.basic import * +main() diff --git a/tools/ansible-openstack/openstack-ansible-modules/neutron_router b/tools/ansible-openstack/openstack-ansible-modules/neutron_router new file mode 100644 index 0000000..56d384d --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/neutron_router @@ -0,0 +1,210 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + from neutronclient.neutron import client + from keystoneclient.v2_0 import client as ksclient +except ImportError: + print("failed=True msg='neutronclient and keystone client are required'") + +DOCUMENTATION = ''' +--- +module: neutron_router +version_added: "1.2" +short_description: Create or Remove router from openstack +description: + - Create or Delete routers from OpenStack +options: + login_username: + description: + - login username to authenticate to keystone + required: true + default: admin + login_password: + description: + - Password of login user + required: true + default: 'yes' + login_tenant_name: + description: + - The tenant name of the login user + required: true + default: 'yes' + auth_url: + description: + - The keystone url for authentication + required: false + default: 'http://127.0.0.1:35357/v2.0/' + region_name: + description: + - Name of the region + required: false + default: None + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + name: + description: + - Name to be give to the router + required: true + default: None + tenant_name: + description: + - Name of the tenant for which the router has to be created, if none router would be created for the login tenant. + required: false + default: None + admin_state_up: + description: + - desired admin state of the created router . + required: false + default: true +requirements: ["neutronclient", "keystoneclient"] +''' + +EXAMPLES = ''' +# Creates a router for tenant admin +- neutron_router: state=present + login_username=admin + login_password=admin + login_tenant_name=admin + name=router1" +''' + +_os_keystone = None +_os_tenant_id = None + +def _get_ksclient(module, kwargs): + try: + kclient = ksclient.Client(username=kwargs.get('login_username'), + password=kwargs.get('login_password'), + tenant_name=kwargs.get('login_tenant_name'), + auth_url=kwargs.get('auth_url')) + except Exception as e: + module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) + global _os_keystone + _os_keystone = kclient + return kclient + + +def _get_endpoint(module, ksclient): + try: + endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') + except Exception as e: + module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + return endpoint + +def _get_neutron_client(module, kwargs): + _ksclient = _get_ksclient(module, kwargs) + token = _ksclient.auth_token + endpoint = _get_endpoint(module, _ksclient) + kwargs = { + 'token': token, + 'endpoint_url': endpoint + } + try: + neutron = client.Client('2.0', **kwargs) + except Exception as e: + module.fail_json(msg = "Error in connecting to Neutron: %s " % e.message) + return neutron + +def _set_tenant_id(module): + global _os_tenant_id + if not module.params['tenant_name']: + login_tenant_name = module.params['login_tenant_name'] + else: + login_tenant_name = module.params['tenant_name'] + + for tenant in _os_keystone.tenants.list(): + if tenant.name == login_tenant_name: + _os_tenant_id = tenant.id + break + if not _os_tenant_id: + module.fail_json(msg = "The tenant id cannot be found, please check the paramters") + + +def _get_router_id(module, neutron): + kwargs = { + 'name': module.params['name'], + 'tenant_id': _os_tenant_id, + } + try: + routers = neutron.list_routers(**kwargs) + except Exception as e: + module.fail_json(msg = "Error in getting the router list: %s " % e.message) + if not routers['routers']: + return None + return routers['routers'][0]['id'] + +def _create_router(module, neutron): + router = { + 'name': module.params['name'], + 'tenant_id': _os_tenant_id, + 'admin_state_up': module.params['admin_state_up'], + } + try: + new_router = neutron.create_router(dict(router=router)) + except Exception as e: + module.fail_json( msg = "Error in creating router: %s" % e.message) + return new_router['router']['id'] + +def _delete_router(module, neutron, router_id): + try: + neutron.delete_router(router_id) + except: + module.fail_json("Error in deleting the router") + return True + +def main(): + module = AnsibleModule( + argument_spec = dict( + login_username = dict(default='admin'), + login_password = dict(required=True), + login_tenant_name = dict(required='True'), + auth_url = dict(default='http://127.0.0.1:35357/v2.0/'), + region_name = dict(default=None), + name = dict(required=True), + tenant_name = dict(default=None), + state = dict(default='present', choices=['absent', 'present']), + admin_state_up = dict(type='bool', default=True), + ), + ) + + neutron = _get_neutron_client(module, module.params) + _set_tenant_id(module) + + if module.params['state'] == 'present': + router_id = _get_router_id(module, neutron) + if not router_id: + router_id = _create_router(module, neutron) + module.exit_json(changed=True, result="Created", id=router_id) + else: + module.exit_json(changed=False, result="success" , id=router_id) + + else: + router_id = _get_router_id(module, neutron) + if not router_id: + module.exit_json(changed=False, result="success") + else: + _delete_router(module, neutron, router_id) + module.exit_json(changed=True, result="deleted") + +# this is magic, see lib/ansible/module.params['common.py +from ansible.module_utils.basic import * +main() diff --git a/tools/ansible-openstack/openstack-ansible-modules/neutron_router_gateway b/tools/ansible-openstack/openstack-ansible-modules/neutron_router_gateway new file mode 100644 index 0000000..93235b8 --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/neutron_router_gateway @@ -0,0 +1,215 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + from neutronclient.neutron import client + from keystoneclient.v2_0 import client as ksclient +except ImportError: + print("failed=True msg='neutronclient and keystone client are required'") +DOCUMENTATION = ''' +--- +module: neutron_router_gateway +version_added: "1.2" +short_description: set/unset a gateway interface for the router with the specified external network +description: + - Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic. +options: + login_username: + description: + - login username to authenticate to keystone + required: true + default: admin + login_password: + description: + - Password of login user + required: true + default: 'yes' + login_tenant_name: + description: + - The tenant name of the login user + required: true + default: 'yes' + auth_url: + description: + - The keystone URL for authentication + required: false + default: 'http://127.0.0.1:35357/v2.0/' + region_name: + description: + - Name of the region + required: false + default: None + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + router_name: + description: + - Name of the router to which the gateway should be attached. + required: true + default: None + network_name: + description: + - Name of the external network which should be attached to the router. + required: true + default: None +requirements: ["neutronclient", "keystoneclient"] +''' + +EXAMPLES = ''' +# Attach an external network with a router to allow flow of external traffic +- neutron_router_gateway: state=present login_username=admin login_password=admin + login_tenant_name=admin router_name=external_router + network_name=external_network +''' + +_os_keystone = None +def _get_ksclient(module, kwargs): + try: + kclient = ksclient.Client(username=kwargs.get('login_username'), + password=kwargs.get('login_password'), + tenant_name=kwargs.get('login_tenant_name'), + auth_url=kwargs.get('auth_url')) + except Exception as e: + module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) + global _os_keystone + _os_keystone = kclient + return kclient + + +def _get_endpoint(module, ksclient): + try: + endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') + except Exception as e: + module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + return endpoint + +def _get_neutron_client(module, kwargs): + _ksclient = _get_ksclient(module, kwargs) + token = _ksclient.auth_token + endpoint = _get_endpoint(module, _ksclient) + kwargs = { + 'token': token, + 'endpoint_url': endpoint + } + try: + neutron = client.Client('2.0', **kwargs) + except Exception as e: + module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) + return neutron + +def _get_router_id(module, neutron): + kwargs = { + 'name': module.params['router_name'], + } + try: + routers = neutron.list_routers(**kwargs) + except Exception as e: + module.fail_json(msg = "Error in getting the router list: %s " % e.message) + if not routers['routers']: + return None + return routers['routers'][0]['id'] + +def _get_net_id(neutron, module): + kwargs = { + 'name': module.params['network_name'], + 'router:external': True + } + try: + networks = neutron.list_networks(**kwargs) + except Exception as e: + module.fail_json("Error in listing neutron networks: %s" % e.message) + if not networks['networks']: + return None + return networks['networks'][0]['id'] + +def _get_port_id(neutron, module, router_id, network_id): + kwargs = { + 'device_id': router_id, + 'network_id': network_id, + } + try: + ports = neutron.list_ports(**kwargs) + except Exception as e: + module.fail_json( msg = "Error in listing ports: %s" % e.message) + if not ports['ports']: + return None + return ports['ports'][0]['id'] + +def _add_gateway_router(neutron, module, router_id, network_id): + kwargs = { + 'network_id': network_id + } + try: + neutron.add_gateway_router(router_id, kwargs) + except Exception as e: + module.fail_json(msg = "Error in adding gateway to router: %s" % e.message) + return True + +def _remove_gateway_router(neutron, module, router_id): + try: + neutron.remove_gateway_router(router_id) + except Exception as e: + module.fail_json(msg = "Error in removing gateway to router: %s" % e.message) + return True + +def main(): + + module = AnsibleModule( + argument_spec = dict( + login_username = dict(default='admin'), + login_password = dict(required=True), + login_tenant_name = dict(required='True'), + auth_url = dict(default='http://127.0.0.1:35357/v2.0/'), + region_name = dict(default=None), + router_name = dict(required=True), + network_name = dict(required=True), + state = dict(default='present', choices=['absent', 'present']), + ), + ) + + neutron = _get_neutron_client(module, module.params) + router_id = _get_router_id(module, neutron) + + if not router_id: + module.fail_json(msg="failed to get the router id, please check the router name") + + network_id = _get_net_id(neutron, module) + if not network_id: + module.fail_json(msg="failed to get the network id, please check the network name and make sure it is external") + + if module.params['state'] == 'present': + port_id = _get_port_id(neutron, module, router_id, network_id) + if not port_id: + _add_gateway_router(neutron, module, router_id, network_id) + module.exit_json(changed=True, result="created") + module.exit_json(changed=False, result="success") + + if module.params['state'] == 'absent': + port_id = _get_port_id(neutron, module, router_id, network_id) + if not port_id: + module.exit_json(changed=False, result="Success") + _remove_gateway_router(neutron, module, router_id) + module.exit_json(changed=True, result="Deleted") + +# this is magic, see lib/ansible/module.params['common.py +from ansible.module_utils.basic import * +main() + + diff --git a/tools/ansible-openstack/openstack-ansible-modules/neutron_router_interface b/tools/ansible-openstack/openstack-ansible-modules/neutron_router_interface new file mode 100644 index 0000000..8d57725 --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/neutron_router_interface @@ -0,0 +1,249 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + from neutronclient.neutron import client + from keystoneclient.v2_0 import client as ksclient +except ImportError: + print("failed=True msg='neutronclient and keystone client are required'") +DOCUMENTATION = ''' +--- +module: neutron_router_interface +version_added: "1.2" +short_description: Attach/Dettach a subnet's interface to a router +description: + - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. +options: + login_username: + description: + - login username to authenticate to keystone + required: true + default: admin + login_password: + description: + - Password of login user + required: true + default: 'yes' + login_tenant_name: + description: + - The tenant name of the login user + required: true + default: 'yes' + auth_url: + description: + - The keystone URL for authentication + required: false + default: 'http://127.0.0.1:35357/v2.0/' + region_name: + description: + - Name of the region + required: false + default: None + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + router_name: + description: + - Name of the router to which the subnet's interface should be attached. + required: true + default: None + subnet_name: + description: + - Name of the subnet to whose interface should be attached to the router. + required: true + default: None + tenant_name: + description: + - Name of the tenant whose subnet has to be attached. + required: false + default: None +requirements: ["neutronclient", "keystoneclient"] +''' + +EXAMPLES = ''' +# Attach tenant1's subnet to the external router +- neutron_router_interface: state=present login_username=admin + login_password=admin + login_tenant_name=admin + tenant_name=tenant1 + router_name=external_route + subnet_name=t1subnet +''' + + +_os_keystone = None +_os_tenant_id = None + +def _get_ksclient(module, kwargs): + try: + kclient = ksclient.Client(username=kwargs.get('login_username'), + password=kwargs.get('login_password'), + tenant_name=kwargs.get('login_tenant_name'), + auth_url=kwargs.get('auth_url')) + except Exception as e: + module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) + global _os_keystone + _os_keystone = kclient + return kclient + + +def _get_endpoint(module, ksclient): + try: + endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') + except Exception as e: + module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + return endpoint + +def _get_neutron_client(module, kwargs): + _ksclient = _get_ksclient(module, kwargs) + token = _ksclient.auth_token + endpoint = _get_endpoint(module, _ksclient) + kwargs = { + 'token': token, + 'endpoint_url': endpoint + } + try: + neutron = client.Client('2.0', **kwargs) + except Exception as e: + module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) + return neutron + +def _set_tenant_id(module): + global _os_tenant_id + if not module.params['tenant_name']: + login_tenant_name = module.params['login_tenant_name'] + else: + login_tenant_name = module.params['tenant_name'] + + for tenant in _os_keystone.tenants.list(): + if tenant.name == login_tenant_name: + _os_tenant_id = tenant.id + break + if not _os_tenant_id: + module.fail_json(msg = "The tenant id cannot be found, please check the paramters") + + +def _get_router_id(module, neutron): + kwargs = { + 'name': module.params['router_name'], + } + try: + routers = neutron.list_routers(**kwargs) + except Exception as e: + module.fail_json(msg = "Error in getting the router list: %s " % e.message) + if not routers['routers']: + return None + return routers['routers'][0]['id'] + + +def _get_subnet_id(module, neutron): + subnet_id = None + kwargs = { + 'tenant_id': _os_tenant_id, + 'name': module.params['subnet_name'], + } + try: + subnets = neutron.list_subnets(**kwargs) + except Exception as e: + module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) + if not subnets['subnets']: + return None + return subnets['subnets'][0]['id'] + +def _get_port_id(neutron, module, router_id, subnet_id): + kwargs = { + 'tenant_id': _os_tenant_id, + 'device_id': router_id, + } + try: + ports = neutron.list_ports(**kwargs) + except Exception as e: + module.fail_json( msg = "Error in listing ports: %s" % e.message) + if not ports['ports']: + return None + for port in ports['ports']: + for subnet in port['fixed_ips']: + if subnet['subnet_id'] == subnet_id: + return port['id'] + return None + +def _add_interface_router(neutron, module, router_id, subnet_id): + kwargs = { + 'subnet_id': subnet_id + } + try: + neutron.add_interface_router(router_id, kwargs) + except Exception as e: + module.fail_json(msg = "Error in adding interface to router: %s" % e.message) + return True + +def _remove_interface_router(neutron, module, router_id, subnet_id): + kwargs = { + 'subnet_id': subnet_id + } + try: + neutron.remove_interface_router(router_id, kwargs) + except Exception as e: + module.fail_json(msg="Error in removing interface from router: %s" % e.message) + return True + +def main(): + module = AnsibleModule( + argument_spec = dict( + login_username = dict(default='admin'), + login_password = dict(required=True), + login_tenant_name = dict(required='True'), + auth_url = dict(default='http://127.0.0.1:35357/v2.0/'), + region_name = dict(default=None), + router_name = dict(required=True), + subnet_name = dict(required=True), + tenant_name = dict(default=None), + state = dict(default='present', choices=['absent', 'present']), + ), + ) + + neutron = _get_neutron_client(module, module.params) + _set_tenant_id(module) + + router_id = _get_router_id(module, neutron) + if not router_id: + module.fail_json(msg="failed to get the router id, please check the router name") + + subnet_id = _get_subnet_id(module, neutron) + if not subnet_id: + module.fail_json(msg="failed to get the subnet id, please check the subnet name") + + if module.params['state'] == 'present': + port_id = _get_port_id(neutron, module, router_id, subnet_id) + if not port_id: + _add_interface_router(neutron, module, router_id, subnet_id) + module.exit_json(changed=True, result="created", id=port_id) + module.exit_json(changed=False, result="success", id=port_id) + + if module.params['state'] == 'absent': + port_id = _get_port_id(neutron, module, router_id, subnet_id) + if not port_id: + module.exit_json(changed = False, result = "Success") + _remove_interface_router(neutron, module, router_id, subnet_id) + module.exit_json(changed=True, result="Deleted") + +# this is magic, see lib/ansible/module.params['common.py +from ansible.module_utils.basic import * +main() diff --git a/tools/ansible-openstack/openstack-ansible-modules/neutron_subnet b/tools/ansible-openstack/openstack-ansible-modules/neutron_subnet new file mode 100644 index 0000000..8cb622b --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/neutron_subnet @@ -0,0 +1,294 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + from neutronclient.neutron import client + from keystoneclient.v2_0 import client as ksclient +except ImportError: + print("failed=True msg='neutron and keystone client are required'") + +DOCUMENTATION = ''' +--- +module: neutron_subnet +version_added: "1.2" +short_description: Add/Remove floating IP from an instance +description: + - Add or Remove a floating IP to an instance +options: + login_username: + description: + - login username to authenticate to keystone + required: true + default: admin + login_password: + description: + - Password of login user + required: true + default: True + login_tenant_name: + description: + - The tenant name of the login user + required: true + default: True + auth_url: + description: + - The keystone URL for authentication + required: false + default: 'http://127.0.0.1:35357/v2.0/' + region_name: + description: + - Name of the region + required: false + default: None + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + network_name: + description: + - Name of the network to which the subnet should be attached + required: true + default: None + cidr: + description: + - The CIDR representation of the subnet that should be assigned to the subnet + required: true + default: None + tenant_name: + description: + - The name of the tenant for whom the subnet should be created + required: false + default: None + ip_version: + description: + - The IP version of the subnet 4 or 6 + required: false + default: 4 + enable_dhcp: + description: + - Whether DHCP should be enabled for this subnet. + required: false + default: true + gateway_ip: + description: + - The ip that would be assigned to the gateway for this subnet + required: false + default: None + dns_nameservers: + description: + - DNS nameservers for this subnet, comma-separated + required: false + default: None + allocation_pool_start: + description: + - From the subnet pool the starting address from which the IP should be allocated + required: false + default: None + allocation_pool_end: + description: + - From the subnet pool the last IP that should be assigned to the virtual machines + required: false + default: None + host_routes: + description: + - Host routes for this subnet, list of dictionaries, e.g. [{destination: 0.0.0.0/0, nexthop: 123.456.78.9}, {destination: 192.168.0.0/24, nexthop: 192.168.0.1}] + required: false + default: None +requirements: ["neutron", "keystoneclient"] +''' + +EXAMPLES = ''' +# Create a subnet for a tenant with the specified subnet +- neutron_subnet: state=present login_username=admin login_password=admin + login_tenant_name=admin tenant_name=tenant1 + network_name=network1 name=net1subnet cidr=192.168.0.0/24" +''' + +_os_keystone = None +_os_tenant_id = None +_os_network_id = None + +def _get_ksclient(module, kwargs): + try: + kclient = ksclient.Client(username=kwargs.get('login_username'), + password=kwargs.get('login_password'), + tenant_name=kwargs.get('login_tenant_name'), + auth_url=kwargs.get('auth_url')) + except Exception as e: + module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) + global _os_keystone + _os_keystone = kclient + return kclient + + +def _get_endpoint(module, ksclient): + try: + endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') + except Exception as e: + module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + return endpoint + +def _get_neutron_client(module, kwargs): + _ksclient = _get_ksclient(module, kwargs) + token = _ksclient.auth_token + endpoint = _get_endpoint(module, _ksclient) + kwargs = { + 'token': token, + 'endpoint_url': endpoint + } + try: + neutron = client.Client('2.0', **kwargs) + except Exception as e: + module.fail_json(msg = " Error in connecting to Neutron: %s" % e.message) + return neutron + +def _set_tenant_id(module): + global _os_tenant_id + if not module.params['tenant_name']: + tenant_name = module.params['login_tenant_name'] + else: + tenant_name = module.params['tenant_name'] + + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break + if not _os_tenant_id: + module.fail_json(msg = "The tenant id cannot be found, please check the paramters") + +def _get_net_id(neutron, module): + kwargs = { + 'tenant_id': _os_tenant_id, + 'name': module.params['network_name'], + } + try: + networks = neutron.list_networks(**kwargs) + except Exception as e: + module.fail_json("Error in listing Neutron networks: %s" % e.message) + if not networks['networks']: + return None + return networks['networks'][0]['id'] + + +def _get_subnet_id(module, neutron): + global _os_network_id + subnet_id = None + _os_network_id = _get_net_id(neutron, module) + if not _os_network_id: + module.fail_json(msg = "network id of network not found.") + else: + kwargs = { + 'tenant_id': _os_tenant_id, + 'name': module.params['name'], + } + try: + subnets = neutron.list_subnets(**kwargs) + except Exception as e: + module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) + if not subnets['subnets']: + return None + return subnets['subnets'][0]['id'] + +def _create_subnet(module, neutron): + neutron.format = 'json' + subnet = { + 'name': module.params['name'], + 'ip_version': module.params['ip_version'], + 'enable_dhcp': module.params['enable_dhcp'], + 'tenant_id': _os_tenant_id, + 'gateway_ip': module.params['gateway_ip'], + 'dns_nameservers': module.params['dns_nameservers'], + 'network_id': _os_network_id, + 'cidr': module.params['cidr'], + 'host_routes': module.params['host_routes'], + } + if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: + allocation_pools = [ + { + 'start' : module.params['allocation_pool_start'], + 'end' : module.params['allocation_pool_end'] + } + ] + subnet.update({'allocation_pools': allocation_pools}) + if not module.params['gateway_ip']: + subnet.pop('gateway_ip') + if module.params['dns_nameservers']: + subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',') + else: + subnet.pop('dns_nameservers') + if not module.params['host_routes']: + subnet.pop('host_routes') + try: + new_subnet = neutron.create_subnet(dict(subnet=subnet)) + except Exception, e: + module.fail_json(msg = "Failure in creating subnet: %s" % e.message) + return new_subnet['subnet']['id'] + + +def _delete_subnet(module, neutron, subnet_id): + try: + neutron.delete_subnet(subnet_id) + except Exception as e: + module.fail_json( msg = "Error in deleting subnet: %s" % e.message) + return True + + +def main(): + + module = AnsibleModule( + argument_spec = dict( + login_username = dict(default='admin'), + login_password = dict(required=True), + login_tenant_name = dict(required='True'), + auth_url = dict(default='http://127.0.0.1:35357/v2.0/'), + region_name = dict(default=None), + name = dict(required=True), + network_name = dict(required=True), + cidr = dict(required=True), + tenant_name = dict(default=None), + state = dict(default='present', choices=['absent', 'present']), + ip_version = dict(default='4', choices=['4', '6']), + enable_dhcp = dict(default='true', choices=BOOLEANS), + gateway_ip = dict(default=None), + dns_nameservers = dict(default=None), + allocation_pool_start = dict(default=None), + allocation_pool_end = dict(default=None), + host_routes = dict(default=None), + ), + ) + neutron = _get_neutron_client(module, module.params) + _set_tenant_id(module) + if module.params['state'] == 'present': + subnet_id = _get_subnet_id(module, neutron) + if not subnet_id: + subnet_id = _create_subnet(module, neutron) + module.exit_json(changed = True, result = "Created" , id = subnet_id) + else: + module.exit_json(changed = False, result = "success" , id = subnet_id) + else: + subnet_id = _get_subnet_id(module, neutron) + if not subnet_id: + module.exit_json(changed = False, result = "success") + else: + _delete_subnet(module, neutron, subnet_id) + module.exit_json(changed = True, result = "deleted") + +# this is magic, see lib/ansible/module.params['common.py +from ansible.module_utils.basic import * +main() diff --git a/tools/ansible-openstack/openstack-ansible-modules/nova_manage b/tools/ansible-openstack/openstack-ansible-modules/nova_manage new file mode 100644 index 0000000..6b1a297 --- /dev/null +++ b/tools/ansible-openstack/openstack-ansible-modules/nova_manage @@ -0,0 +1,93 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: nova_manage +short_description: Initialize OpenStack Compute (nova) database +description: Create the tables for the database backend used by nova +options: + action: + description: + - action to perform. Currently only dbsync is supported + required: true +requirements: [ nova ] +''' + +EXAMPLES = ''' +nova_manage: action=dbsync +''' + +import subprocess + +try: + from nova.db.sqlalchemy import migration + from nova import config +except ImportError: + nova_found = False +else: + nova_found = True + + +def load_config_file(): + config.parse_args([]) + + +def will_db_change(): + """ Check if the database version will change after the sync. + + """ + # Load the config file options + current_version = migration.db_version() + repository = migration._find_migrate_repo() + repo_version = repository.latest + return current_version != repo_version + + +def do_dbsync(): + """Do the dbsync. Returns (returncode, stdout, stderr)""" + # We call nova-manage db_sync on the shell rather than trying to + # do this in Python since we have no guarantees about changes to the + # internals. + args = ['nova-manage', 'db', 'sync'] + + call = subprocess.Popen(args, shell=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = call.communicate() + return (call.returncode, out, err) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + action=dict(required=True), + ), + supports_check_mode=True + ) + + if not nova_found: + module.fail_json(msg="nova package could not be found") + + action = module.params['action'] + + if action not in ['dbsync', 'db_sync']: + module.fail_json(msg="Only supported action is 'dbsync'") + + load_config_file() + + changed = will_db_change() + if module.check_mode: + module.exit_json(changed=changed) + + (res, stdout, stderr) = do_dbsync() + + if res == 0: + module.exit_json(changed=changed, stdout=stdout, stderr=stderr) + else: + module.fail_json(msg="nova-manage returned non-zero value: %d" % res, + stdout=stdout, stderr=stderr) + +# this is magic, see lib/ansible/module_common.py +#<> +main() diff --git a/tools/ansible-openstack/openstack_hosts b/tools/ansible-openstack/openstack_hosts new file mode 100644 index 0000000..d72893b --- /dev/null +++ b/tools/ansible-openstack/openstack_hosts @@ -0,0 +1,24 @@ +[frontend] +testsrv03 + +[controller] +testsrv03 + +[sql_backend] +testsrv03 + +[amqp_backend] +testsrv03 + +[volume_backend] +testsrv03 + +[network_gateway] +testsrv03 + +[compute_backend] +testsrv04 +testsrv06 +testsrv07 +testsrv08 + diff --git a/tools/ansible-openstack/playbooks/cinder/controller.yml b/tools/ansible-openstack/playbooks/cinder/controller.yml new file mode 100644 index 0000000..72b8275 --- /dev/null +++ b/tools/ansible-openstack/playbooks/cinder/controller.yml @@ -0,0 +1,42 @@ +--- +- name: OpenStack Block Storage (Controller) + hosts: controller + sudo: yes + + tasks: + - name: ensure cinder-related packages are installed + yum: name={{ item }} state=latest + with_items: + - openstack-cinder + - openstack-utils + - openstack-selinux + - MySQL-python + notify: + - restart cinder services + + - name: ensure cinder.conf file is configured + template: > + src=templates/etc/cinder/cinder.conf + dest=/etc/cinder/cinder.conf + owner=root group=cinder mode=0640 backup=no + notify: + - restart cinder services + + + + - name: ensure cinder automatically boots after reboot + service: name={{ item }} state=started enabled=yes + with_items: + - openstack-cinder-scheduler + + handlers: + + - name: restart cinder services + service: name={{ item }} state=restarted + with_items: + - openstack-cinder-scheduler + + + + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/cinder/frontend.yml b/tools/ansible-openstack/playbooks/cinder/frontend.yml new file mode 100644 index 0000000..66145ac --- /dev/null +++ b/tools/ansible-openstack/playbooks/cinder/frontend.yml @@ -0,0 +1,46 @@ +--- +- name: OpenStack Block Storage (Frontend) + hosts: frontend + sudo: yes + + tasks: + + - name: ensure cinder-related packages are installed + yum: name={{item}} state=latest + with_items: + - openstack-cinder + - openstack-utils + - openstack-selinux + - MySQL-python + notify: + - restart cinder services + + - name: ensure cinder.conf file is configured + template: > + src=templates/etc/cinder/cinder.conf + dest=/etc/cinder/cinder.conf + owner=root group=cinder mode=0640 backup=no + notify: + - restart cinder services + + - name: ensure cinder port 8776 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=8776 + + + # cinder_manage looks for flags.py which is no longer used? + - name: ensure cinder database is synchronized + command: su cinder -s /bin/sh -c "cinder-manage db sync" + notify: restart cinder services + + + - name: ensure cinder automatically boots after reboot + service: name={{ item }} state=started enabled=yes + with_items: + - openstack-cinder-api + + handlers: + - include: ../reusables/handlers.yml + - name: restart cinder services + service: name={{ item }} state=restarted + with_items: + - openstack-cinder-api diff --git a/tools/ansible-openstack/playbooks/cinder/keystone.yml b/tools/ansible-openstack/playbooks/cinder/keystone.yml new file mode 100644 index 0000000..d43d6ef --- /dev/null +++ b/tools/ansible-openstack/playbooks/cinder/keystone.yml @@ -0,0 +1,47 @@ +--- +- name: make sure cinder is in keystone + hosts: frontend[0] + sudo: False + gather_facts: True + tasks: + + - name: create cinder service user + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: "{{ service_tenant }}" + user: cinder + password: "{{ cinder_identity_password }}" + + - name: add cinder service user to the service tenant with the admin role + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: "{{ service_tenant }}" + user: cinder + role: admin + + - name: add cinder endpoint to keystone + keystone_service: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + region: "{{ openstack_region }}" + name: cinder + type: volume + description: "Volume Service" + public_url: "{{ cinder_public_url }}" + internal_url: "{{ cinder_internal_url }}" + admin_url: "{{ cinder_admin_url }}" + + - name: add cinder endpoint to keystone + keystone_service: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + region: "{{ openstack_region }}" + name: cinderv2 + type: volumev2 + description: "Volume Service V2" + public_url: "{{ cinderv2_public_url }}" + internal_url: "{{ cinderv2_internal_url }}" + admin_url: "{{ cinderv2_admin_url }}" + diff --git a/tools/ansible-openstack/playbooks/cinder/library b/tools/ansible-openstack/playbooks/cinder/library new file mode 100644 index 0000000..ad514cb --- /dev/null +++ b/tools/ansible-openstack/playbooks/cinder/library @@ -0,0 +1 @@ +../../openstack-ansible-modules/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/cinder/main.yml b/tools/ansible-openstack/playbooks/cinder/main.yml new file mode 100644 index 0000000..61a7b0f --- /dev/null +++ b/tools/ansible-openstack/playbooks/cinder/main.yml @@ -0,0 +1,5 @@ +- include: mysql.yml +- include: keystone.yml +- include: frontend.yml +- include: controller.yml +- include: volume.yml diff --git a/tools/ansible-openstack/playbooks/cinder/mysql.yml b/tools/ansible-openstack/playbooks/cinder/mysql.yml new file mode 100644 index 0000000..6d1da5a --- /dev/null +++ b/tools/ansible-openstack/playbooks/cinder/mysql.yml @@ -0,0 +1,20 @@ +--- +- name: Install Cinder database + hosts: sql_backend[0] + sudo: True + tasks: + + - name: ensure cinder database is present + mysql_db: + name: cinder + encoding: utf8 + + - name: ensure cinder database user is present + mysql_user: + name: cinder + host: "{{ item }}" + password: "{{ cinder_db_password }}" + priv: cinder.*:ALL + with_items: + - "%" + - localhost diff --git a/tools/ansible-openstack/playbooks/cinder/templates b/tools/ansible-openstack/playbooks/cinder/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/cinder/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/cinder/volume.yml b/tools/ansible-openstack/playbooks/cinder/volume.yml new file mode 100644 index 0000000..f290032 --- /dev/null +++ b/tools/ansible-openstack/playbooks/cinder/volume.yml @@ -0,0 +1,61 @@ +--- +- name: OpenStack Block Storage (Backend) + hosts: volume_backend + sudo: yes + + tasks: + + - name: ensure cinder-related packages are installed + yum: name={{item}} state=latest + with_items: + - openstack-cinder + - openstack-utils + - openstack-selinux + - MySQL-python + notify: + - restart cinder services + + - name: ensure cinder.conf file is configured + template: > + src=templates/etc/cinder/cinder.conf + dest=/etc/cinder/cinder.conf + owner=root group=cinder mode=0640 backup=no + notify: + - restart cinder services + + + + - name: ensure cinder volume group is created + lvg: + vg: "{{ cinder_volume }}" + pvs: "{{ cinder_volume_dev }}" + state: present + vg_options: "" + notify: + - restart cinder services + + - name: ensure tgtd.conf is configured + lineinfile: > + dest=/etc/tgt/targets.conf + state=present + line="include /etc/cinder/volumes/*" + + - name: ensure cinder and tgtd automatically boots after reboot + service: name={{ item }} state=started enabled=yes + with_items: + - openstack-cinder-volume + - tgtd + + - name: ensure iscsi port 3260 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=3260 + + + handlers: + - include: ../reusables/handlers.yml + - name: restart cinder services + service: name={{item}} state=restarted + with_items: + - openstack-cinder-volume + - tgtd + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/common/common.yml b/tools/ansible-openstack/playbooks/common/common.yml new file mode 100644 index 0000000..0fbe425 --- /dev/null +++ b/tools/ansible-openstack/playbooks/common/common.yml @@ -0,0 +1,71 @@ +--- +- name: System settings for all OpenStack nodes + hosts: all + sudo: yes + + tasks: + - name: ensure python SELinux library is installed + yum: name={{ item }} state=latest + with_items: + - libselinux-python + + - name: ensure SELinux is congirured permissive + selinux: > + policy=targeted + state=permissive + + - name: ensure epel and rdo-release repository are installed + yum: name={{ item }} state=present + with_items: + - "http://ftp.riken.jp/Linux/fedora/epel/6/x86_64/epel-release-6-8.noarch.rpm" + - "http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/rdo-release-icehouse-3.noarch.rpm" + when: not use_your_own_repository + + - name: ensure local repository file is updated(if any) + copy: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: root + group: root + mode: 0644 + with_items: + - { src: templates/etc/yum.repos.d/CentOS-Base.repo, dest: /etc/yum.repos.d/CentOS-Base.repo } + - { src: templates/etc/yum.repos.d/epel.repo, dest: /etc/yum.repos.d/epel.repo } + - { src: templates/etc/yum.repos.d/epel-testing.repo, dest: /etc/yum.repos.d/epel-testing.repo } + - { src: templates/etc/yum.repos.d/rdo-release.repo, dest: /etc/yum.repos.d/rdo-release.repo } + when: use_your_own_repository + + - name: yum clean all + command: /usr/bin/yum clean all + + - name: ensure python keyczar is installed + yum: name={{ item }} state=latest + with_items: + - python-keyczar + + - name: ensure python-keyczar port 5099 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=5099 + + - name: ensure additional packages are installed + yum: name={{ item }} state=latest + with_items: + - bash-completion + - openstack-utils + - iptables + - openstack-selinux + + - name: make sure iptables is automatically booted + service: name=iptables state=started enabled=yes + + - name: "Build hosts file for OpenStack nodes" + lineinfile: dest=/etc/hosts regexp='.*{{hostvars[item].ansible_fqdn}}$' line="{{ hostvars[item].my_int_ip }} {{item}} {{hostvars[item].ansible_fqdn}}" state=present + when: hostvars[item].ansible_default_ipv4.address is defined + with_items: groups['all'] + + - name: ensure all packages are updated (yum update) + yum: name=* state=latest + + + + handlers: + - include: ../reusables/handlers.yml diff --git a/tools/ansible-openstack/playbooks/common/common_kernelParameters.yml b/tools/ansible-openstack/playbooks/common/common_kernelParameters.yml new file mode 100644 index 0000000..4f2e2ca --- /dev/null +++ b/tools/ansible-openstack/playbooks/common/common_kernelParameters.yml @@ -0,0 +1,45 @@ +--- +- name: edit kernel parameters for COMPUTE and NW + hosts: compute_backend:network_gateway + sudo: yes + + tasks: + - name: to make sure net.bridge.bridge-nf-call-* to be activated after reboot + copy: > + src=templates/etc/sysconfig/bridge.modules + dest=/etc/sysconfig/modules + owner=root + group=root + mode=0755 + + - name: manually load bridge module + modprobe: name=bridge state=present + + - name: set all.rp_filter zero + sysctl: name=net.ipv4.conf.all.rp_filter value=0 state=present reload=yes sysctl_set=yes + + - name: set default.rp_filter zero + sysctl: name=net.ipv4.conf.default.rp_filter value=0 state=present reload=yes sysctl_set=yes + + - name: set net.bridge.bridge-nf-call-ip6tables one + sysctl: name=net.bridge.bridge-nf-call-ip6tables value=1 state=present reload=yes sysctl_set=yes + + - name: set net.bridge.bridge-nf-call-iptables one + sysctl: name=net.bridge.bridge-nf-call-iptables value=1 state=present reload=yes sysctl_set=yes + + - name: set net.bridge.bridge-nf-call-arptables one + sysctl: name=net.bridge.bridge-nf-call-arptables value=1 state=present reload=yes sysctl_set=yes + + +- name: edit kernel parameters exclusively for NW + hosts: network_gateway + sudo: yes + + tasks: + - name: set ip_forward one + sysctl: name=net.ipv4.ip_forward value=1 state=present reload=yes sysctl_set=yes + + + + + diff --git a/tools/ansible-openstack/playbooks/common/common_sysconfig_VLAN.yml b/tools/ansible-openstack/playbooks/common/common_sysconfig_VLAN.yml new file mode 100644 index 0000000..3fbf0bb --- /dev/null +++ b/tools/ansible-openstack/playbooks/common/common_sysconfig_VLAN.yml @@ -0,0 +1,15 @@ +--- +- name: edit sysconfig/network for VLAN + hosts: compute_backend:network_gateway + sudo: yes + + tasks: + - name: "enable VLAN" + lineinfile: dest=/etc/sysconfig/network regexp='^VLAN=.*' line="VLAN=yes" state=present + + - name: "set VLAN type" + lineinfile: dest=/etc/sysconfig/network regexp='^VLAN_NAME_TYPE=.*' line="VLAN_NAME_TYPE=DEV_PLUS_VID_NO_PAD" state=present + + - name: "manually load module" + modprobe: name=8021q state=present + diff --git a/tools/ansible-openstack/playbooks/common/main.yml b/tools/ansible-openstack/playbooks/common/main.yml new file mode 100644 index 0000000..d897815 --- /dev/null +++ b/tools/ansible-openstack/playbooks/common/main.yml @@ -0,0 +1,7 @@ +- include: common.yml +- include: common_sysconfig_VLAN.yml +- include: common_kernelParameters.yml +- include: yum_iproute.yml +- include: yum_KVM.yml +- include: yum_kernel.yml + diff --git a/tools/ansible-openstack/playbooks/common/templates b/tools/ansible-openstack/playbooks/common/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/common/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/common/yum_KVM.yml b/tools/ansible-openstack/playbooks/common/yum_KVM.yml new file mode 100644 index 0000000..4a2d6c8 --- /dev/null +++ b/tools/ansible-openstack/playbooks/common/yum_KVM.yml @@ -0,0 +1,37 @@ +--- +- name: Update packages exclusivelly for COMPUTE nodes + hosts: compute_backend + sudo: yes + + tasks: + - name: ensure virsh packages are installed + yum: name={{ item.package }} state={{ item.state }} + with_items: + - { package: '@virtualization-platform', state: latest } + - { package: '@virtualization-tools', state: latest} + notify: + - restart libvirtd + + - name: ensure libvirtd boots automatically after reboot + service: name=libvirtd state=started enabled=yes + + + - name: ensure vibr0 is stopped + command: /usr/bin/virsh net-destroy default + ignore_errors: True + notify: + - restart libvirtd + + - name: ensure vibr0 is disabled + command: /usr/bin/virsh net-autostart default --disable + ignore_errors: True + notify: + - restart libvirtd + + + handlers: + - name: restart libvirtd + service: name=libvirtd state=restarted + + + diff --git a/tools/ansible-openstack/playbooks/common/yum_iproute.yml b/tools/ansible-openstack/playbooks/common/yum_iproute.yml new file mode 100644 index 0000000..2ad67dd --- /dev/null +++ b/tools/ansible-openstack/playbooks/common/yum_iproute.yml @@ -0,0 +1,13 @@ +--- +- name: update network namespace package + hosts: network_gateway + sudo: yes + + tasks: + - name: ensure lates network namespace package + yum: name={{ item }} state=latest + with_items: + - iproute + + - name: ensure lates network namespace package + yum: name={{ latest_dnsmasq }} state=present diff --git a/tools/ansible-openstack/playbooks/common/yum_kernel.yml b/tools/ansible-openstack/playbooks/common/yum_kernel.yml new file mode 100644 index 0000000..b8344c1 --- /dev/null +++ b/tools/ansible-openstack/playbooks/common/yum_kernel.yml @@ -0,0 +1,18 @@ +--- +- name: update kernel for Network, Compute + hosts: network_gateway:compute_backend + sudo: yes + + tasks: + - name: ensure openstack kernel is installed + yum: name={{ openstack_kernel }} state=latest + notify: reboot host + + + handlers: + - name: reboot host + shell: sleep 2s && /sbin/reboot & + notify: wait for the server to come up(max 10min) + + - name: wait for the server to come up(max 10min) + local_action: wait_for host={{ inventory_hostname }} port=22 delay=30 timeout=600 diff --git a/tools/ansible-openstack/playbooks/glance/controller.yml b/tools/ansible-openstack/playbooks/glance/controller.yml new file mode 100644 index 0000000..9ace567 --- /dev/null +++ b/tools/ansible-openstack/playbooks/glance/controller.yml @@ -0,0 +1,42 @@ +--- +- name: OpenStack Image Service (Controller) + hosts: controller + sudo: yes + + tasks: + - name: ensure glance is installed + yum: name={{ item }} state=latest + with_items: + - openstack-glance + - openstack-utils + - openstack-selinux + - sheepdog + - MySQL-python + notify: + - restart glance-registry + + - name: ensure glance-registry.conf file file is configured + template: > + src=templates/etc/glance/glance-registry.conf + dest=/etc/glance/glance-registry.conf + owner=root + group=glance + mode=0640 + backup=no + notify: + - restart glance-registry + + - name: ensure glance-registry port is opened. + include: ../reusables/open_firewall_port.yml protocol=tcp port=9191 + + - name: ensure glance-registry automatically boots after reboot + service: name=openstack-glance-registry state=started enabled=yes + + + handlers: + - include: ../reusables/handlers.yml + + - name: restart glance-registry + service: name=openstack-glance-registry state=restarted + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/glance/fixture.yml b/tools/ansible-openstack/playbooks/glance/fixture.yml new file mode 100644 index 0000000..fa7d119 --- /dev/null +++ b/tools/ansible-openstack/playbooks/glance/fixture.yml @@ -0,0 +1,19 @@ +--- +- name: OpenStack Image Service (Fixture(s)) + hosts: frontend[0] + sudo: yes + + tasks: + - name: ensure glance image is registered + glance_image: + auth_url: "{{ keystone_public_url }}" + copy_from: "{{ item.url }}" + disk_format: "{{ item.disk_format }}" + is_public: yes + login_username: "{{ admin_user }}" + login_password: "{{ admin_password }}" + login_tenant_name: "{{ admin_tenant }}" + name: "{{ item.name}}" + region_name: "{{ openstack_region }}" + with_items: glance_images + diff --git a/tools/ansible-openstack/playbooks/glance/frontend.yml b/tools/ansible-openstack/playbooks/glance/frontend.yml new file mode 100644 index 0000000..0396481 --- /dev/null +++ b/tools/ansible-openstack/playbooks/glance/frontend.yml @@ -0,0 +1,54 @@ +--- +- name: OpenStack Image Serivce (Frontend) + hosts: frontend + sudo: yes + + + tasks: + - name: ensure glance is installed + yum: name={{ item }} state=latest + with_items: + - openstack-glance + - openstack-utils + - openstack-selinux + - sheepdog + - MySQL-python + notify: + - restart glance-api + + + - name: ensure glance-api.conf file is configured + template: > + src=templates/etc/glance/glance-api.conf + dest=/etc/glance/glance-api.conf + owner=root + group=glance + mode=0640 + backup=no + notify: + - restart glance-api + + # glance_manager cannot be used due to the lack of glance.db.sqlalchemy.migration.py + - name: ensure glance database is synchronized + command: su glance -s /bin/sh -c "glance-manage --debug db_sync" + #glance_manage: action=dbsync + notify: + - restart glance-api + + - name: ensure glance-api port 9292 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=9292 + + - name: ensure glance-api automatically boots after reboot + service: name=openstack-glance-api state=started enabled=yes + + + + handlers: + - include: ../reusables/handlers.yml + + - name: restart glance-api + service: name=openstack-glance-api state=restarted + + + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/glance/keystone.yml b/tools/ansible-openstack/playbooks/glance/keystone.yml new file mode 100644 index 0000000..4013527 --- /dev/null +++ b/tools/ansible-openstack/playbooks/glance/keystone.yml @@ -0,0 +1,35 @@ +--- +- name: make sure glance is in keystone + hosts: frontend[0] + gather_facts: True + sudo: False + tasks: + + - name: create glance user in keystone + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: "{{ service_tenant }}" + user: glance + password: "{{ glance_identity_password }}" + + - name: add glance user to the service tenant with the admin role + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: "{{ service_tenant }}" + user: glance + role: admin + + - name: add glance endpoint + keystone_service: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + region: "{{ openstack_region }}" + name: glance + type: image + description: "Image Service" + public_url: "{{ glance_public_url }}" + internal_url: "{{ glance_internal_url }}" + admin_url: "{{ glance_admin_url }}" + diff --git a/tools/ansible-openstack/playbooks/glance/library b/tools/ansible-openstack/playbooks/glance/library new file mode 100644 index 0000000..ad514cb --- /dev/null +++ b/tools/ansible-openstack/playbooks/glance/library @@ -0,0 +1 @@ +../../openstack-ansible-modules/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/glance/main.yml b/tools/ansible-openstack/playbooks/glance/main.yml new file mode 100644 index 0000000..074b5de --- /dev/null +++ b/tools/ansible-openstack/playbooks/glance/main.yml @@ -0,0 +1,6 @@ +--- +- include: mysql.yml +- include: keystone.yml +- include: frontend.yml +- include: controller.yml +- include: fixture.yml diff --git a/tools/ansible-openstack/playbooks/glance/mysql.yml b/tools/ansible-openstack/playbooks/glance/mysql.yml new file mode 100644 index 0000000..5ee4cde --- /dev/null +++ b/tools/ansible-openstack/playbooks/glance/mysql.yml @@ -0,0 +1,21 @@ +--- +- name: OpenStack Identity Database + hosts: sql_backend[0] + sudo: True + gather_facts: True + tasks: + + - name: ensure glance database is present + mysql_db: + name: glance + encoding: utf8 + + - name: ensure glance database user is present + mysql_user: + name: glance + host: "{{ item }}" + password: "{{ glance_db_password }}" + priv: glance.*:ALL + with_items: + - "%" + - localhost diff --git a/tools/ansible-openstack/playbooks/glance/templates b/tools/ansible-openstack/playbooks/glance/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/glance/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/horizon/main.yml b/tools/ansible-openstack/playbooks/horizon/main.yml new file mode 100644 index 0000000..af86b0a --- /dev/null +++ b/tools/ansible-openstack/playbooks/horizon/main.yml @@ -0,0 +1,55 @@ +--- +- name: OpenStack Dashboard + hosts: frontend + sudo: yes + + tasks: + - name: ensure horizon packages are installed + yum: name={{ item }} state=latest + with_items: + - mod_wsgi + - httpd + - mod_ssl + - memcached + - python-memcached + - openstack-dashboard + notify: + - ensure services are restarted + + - name: ensure local_settings file is configured + template: > + src=templates/etc/openstack-dashboard/local_settings + dest=/etc/openstack-dashboard/local_settings + owner=root + group=root + mode=0644 + backup=yes + notify: ensure services are restarted + + - name: ensure httpd.conf is configured + lineinfile: > + dest=/etc/httpd/conf/httpd.conf + state=present + line="ServerName {{ ansible_hostname }}:80" + insertafter='#ServerName www.example.com:80' + notify: + - ensure services are restarted + + - name: ensure port 80 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=80 + - name: ensure port 443 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=443 + + - name: ensure httpd services automatically boots after reboot + service: name={{item}} state=started enabled=yes + with_items: + - httpd + + handlers: + - include: ../reusables/handlers.yml + + - name: ensure services are restarted + service: name={{ item }} state=restarted + with_items: + - httpd + - memcached diff --git a/tools/ansible-openstack/playbooks/horizon/templates b/tools/ansible-openstack/playbooks/horizon/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/horizon/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/keystone/fixture.yml b/tools/ansible-openstack/playbooks/keystone/fixture.yml new file mode 100644 index 0000000..98f9ccc --- /dev/null +++ b/tools/ansible-openstack/playbooks/keystone/fixture.yml @@ -0,0 +1,64 @@ +--- +- name: OpenStack Identity (Fixture(s)) + hosts: frontend[0] + gather_facts: yes + sudo: yes + tasks: + + - name: create service tenant + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: "{{ service_tenant }}" + tenant_description: "Service Tenant" + + - name: add keystone endpoint information + keystone_service: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + region: "{{ openstack_region }}" + name: keystone + type: identity + description: "Identity Service" + public_url: "{{ keystone_public_url }}" + internal_url: "{{ keystone_internal_url }}" + admin_url: "{{ keystone_admin_url }}" + + - name: create admin tenant + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: admin + tenant_description: "Admin Tenant" + + - name: create admin user + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: admin + user: admin + password: "{{ admin_password }}" + + - name: create admin role and associate it with admin user + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: admin + user: admin + role: admin + + - name: create Member role and associate it with admin user + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: admin + user: admin + role: Member + + - name: copy keystone rc file + template: src={{ item.src }} dest={{ item.dest }} owner=root mode=0700 + with_items: + - { src: templates/root/keystonerc_admin, dest: /root/keystonerc_admin } + - { src: templates/root/openrc, dest: /root/openrc } + + diff --git a/tools/ansible-openstack/playbooks/keystone/frontend.yml b/tools/ansible-openstack/playbooks/keystone/frontend.yml new file mode 100644 index 0000000..e0cffa6 --- /dev/null +++ b/tools/ansible-openstack/playbooks/keystone/frontend.yml @@ -0,0 +1,89 @@ +--- +- name: workaround https://groups.google.com/forum/#!topic/ansible-project/nhYAFkD20z0 + hosts: sql_backend + sudo: yes + gather_facts: yes + + + +- name: OpenStack Identity Service + hosts: frontend + sudo: yes + + tasks: + - name: ensure keystone package is installed + yum: name={{ item }} state=latest + with_items: + - openstack-keystone + - openstack-utils + - openstack-selinux + - MySQL-python + - memcached + notify: restart keystone + + - name: ensure keystone.conf file is configured + template: + src: templates/etc/keystone/keystone.conf + dest: /etc/keystone/keystone.conf + owner: root + group: keystone + mode: 0640 + notify: restart keystone + + # keystone_manage cannot used due to lack of migration.py + - name: ensure keystone database is synchronized + command: su -s /bin/sh -c "/usr/bin/keystone-manage db_sync" keystone + #keystone_manage: action=dbsync + notify: restart keystone + + - name: check cert file if public key exists + action: shell test -e /etc/keystone/ssl/private/signing_key.pem && echo "0" || echo "ng" + register: is_file_created + + - name: ensure public key for keystone is made + command: /usr/bin/keystone-manage pki_setup --keystone-user keystone --keystone-group keystone + when: is_file_created.stdout != "0" + + - name: ensure key files are properly chowned + shell: chown -R keystone:keystone /etc/keystone/ssl + + - name: ensure key files are properly chmodded + shell: chmod -R o-rwx /etc/keystone/ssl + + + - name: ensure keystone log is properly ownered + file: path={{ item }} owner=keystone group=keystone state=file + with_items: + - /var/log/keystone/keystone.log + + - name: ensure keystone port 5000 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=5000 + + - name: ensure keystone port 35357 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=35357 + + - name: ensure keystone service boots automatically after reboot + service: name=openstack-keystone state=started enabled=yes + + - name: ensure memcached service boots automatically after reboot + service: name=memcached state=started enabled=yes + + + - name: ensure expired keystone token deletion is schedules + cron: + name: "remove expired token" + minute: 1 + user: "keystone" + job: '/usr/bin/keystone-manage token_flush >/dev/null 2>&1' + state: present + + + + handlers: + - include: ../reusables/handlers.yml + + - name: restart keystone + service: name=openstack-keystone state=restarted + + + diff --git a/tools/ansible-openstack/playbooks/keystone/library b/tools/ansible-openstack/playbooks/keystone/library new file mode 100644 index 0000000..ad514cb --- /dev/null +++ b/tools/ansible-openstack/playbooks/keystone/library @@ -0,0 +1 @@ +../../openstack-ansible-modules/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/keystone/main.yml b/tools/ansible-openstack/playbooks/keystone/main.yml new file mode 100644 index 0000000..467f604 --- /dev/null +++ b/tools/ansible-openstack/playbooks/keystone/main.yml @@ -0,0 +1,4 @@ +--- +- include: mysql.yml +- include: frontend.yml +- include: fixture.yml diff --git a/tools/ansible-openstack/playbooks/keystone/mysql.yml b/tools/ansible-openstack/playbooks/keystone/mysql.yml new file mode 100644 index 0000000..0b68c11 --- /dev/null +++ b/tools/ansible-openstack/playbooks/keystone/mysql.yml @@ -0,0 +1,26 @@ +--- +- name: OpenStack Identity Database + hosts: sql_backend[0] + sudo: yes + + + tasks: + + - name: ensure keystone database is present + mysql_db: + name: keystone + encoding: utf8 + + - name: ensure keystone database user is present + mysql_user: + name: keystone + host: "{{ item }}" + password: "{{ keystone_db_password }}" + priv: keystone.*:ALL + with_items: + - '%' + - localhost + + + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/keystone/templates b/tools/ansible-openstack/playbooks/keystone/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/keystone/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/memcached/main.yml b/tools/ansible-openstack/playbooks/memcached/main.yml new file mode 100644 index 0000000..9cfefa4 --- /dev/null +++ b/tools/ansible-openstack/playbooks/memcached/main.yml @@ -0,0 +1,33 @@ +--- +- name: Memcached + hosts: controller + sudo: yes + + tasks: + - name: ensure memcached package is installed + yum: name=memcached state=latest + + - name: ensure port 11211 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=11211 + + - name: ensure memcached local settings + template: > + src=templates/etc/sysconfig/memcached + dest=/etc/sysconfig/memcached + owner=root + group=root + mode=0644 + notify: restart memcached + + + - name: ensure memcached runs automatically after boot. + service: name=memcached state=started enabled=yes + + + handlers: + - include: ../reusables/handlers.yml + + - name: restart memcached + service: name=memcached state=restarted + + diff --git a/tools/ansible-openstack/playbooks/memcached/templates b/tools/ansible-openstack/playbooks/memcached/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/memcached/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/mysql/main.yml b/tools/ansible-openstack/playbooks/mysql/main.yml new file mode 100644 index 0000000..0deaf86 --- /dev/null +++ b/tools/ansible-openstack/playbooks/mysql/main.yml @@ -0,0 +1,83 @@ +--- +- name: MySQL Server + hosts: sql_backend + gather_facts: yes + sudo: yes + + tasks: + - name: ensure mysql and mysql python client installed + yum: name={{ item }} state=latest + with_items: + - mysql-server + - MySQL-python +# notify: +# - stop mysql + + - name: ensure mysql port is opened. + include: ../reusables/open_firewall_port.yml protocol=tcp port=3306 + + + - name: ensure my.cnf file is configured + copy: > + src=templates/etc/my.cnf + dest=/etc/my.cnf + owner=root + group=root + mode=0664 + notify: + - restart mysql + + - name: ensure mysql runs automatically after boot. + service: name=mysqld state=restarted enabled=yes + + - name: ensure mysql root password for all root accounts are changed (1 of 2) + mysql_user: > + name=root + host={{ item }} + password={{ root_db_password }} + with_items: + - "{{ my_int_ip }}" + - 127.0.0.1 + - "{{ ansible_fqdn }}" + - localhost + + + + + - name: ensure .my.cnf file is configured + template: > + src=templates/root/.my.cnf + dest=/root/.my.cnf + owner=root + mode=0600 + + + - name: reboot mysqld and refresh configuration + service: name=mysqld state=restarted enabled=yes + + + - name: ensure anonymous users are not in the database + mysql_user: > + name='' + host={{ item }} + state=absent + with_items: + - localhost + - "{{ ansible_fqdn }}" + + - name: ensure TEST database is removed + mysql_db: > + name="test" + state=absent + + handlers: + - include: ../reusables/handlers.yml + + - name: stop mysql + service: name=mysqld state=restarted + when: "ansible_hostname != primary_controller_host" + + - name: restart mysql + service: name=mysqld state=restarted +# when: "ansible_hostname == primary_controller_host" + diff --git a/tools/ansible-openstack/playbooks/mysql/templates b/tools/ansible-openstack/playbooks/mysql/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/mysql/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/neutron/compute.yml b/tools/ansible-openstack/playbooks/neutron/compute.yml new file mode 100644 index 0000000..2176db8 --- /dev/null +++ b/tools/ansible-openstack/playbooks/neutron/compute.yml @@ -0,0 +1,75 @@ +--- +- name: set fact for later use + hosts: frontend[0] + sudo: yes + tasks: + # you can't use delegate_to and register at simultaneously + # delegate_to is used for simple "fire and forget" command. + - name: determine tenant id of "service" + shell: > + /usr/bin/keystone + --os-endpoint "{{ keystone_admin_url }}" + --os-token "{{ admin_token }}" + tenant-get {{service_tenant}} | grep "id " | awk {'print $4'} + register: nova_admin_tenant_id + + - name: set_fact nova_admin_tenant_id for later use + action: set_fact + args: + admin_tenantID: "{{ nova_admin_tenant_id.stdout }}" + +- name: OpenStack Networking (Agent) + hosts: compute_backend + sudo: yes + + tasks: + - name: ensure neutron packages are installed + yum: name={{ item }} state=latest + with_items: + - openstack-neutron-ml2 + - openstack-neutron-openvswitch + - openstack-neutron-linuxbridge + - openstack-selinux + notify: + - restart neutron-agents(COMPUTE) + + + - name: ensure neutron configuration files are copied + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: root + group: neutron + mode: 0640 + with_items: + - { src: templates/etc/neutron/neutron.conf, dest: /etc/neutron/neutron.conf} + - { src: templates/etc/neutron/plugins/ml2/ml2_conf_linuxbridge_VLAN.ini, dest: /etc/neutron/plugins/ml2/ml2_conf.ini} + notify: + - restart neutron-agents(COMPUTE) + + - name: ensure symlink to ml2 plugin is created. + file: src=/etc/neutron/plugins/ml2/ml2_conf.ini + path=/etc/neutron/plugin.ini + state=link + + - name: ensure neutron-linuxbridge-agent reads symlink + copy: + src: templates/etc/init.d/neutron-linuxbridge-agent + dest: /etc/init.d/neutron-linuxbridge-agent + owner: root + group: root + mode: 0755 + + - name: ensure neutron-linuxbridge-agent automatically boots after reboot + service: name=neutron-linuxbridge-agent state=started enabled=yes + + + + handlers: + - name: restart neutron-agents(COMPUTE) + service: name={{ item }} state=restarted + with_items: + - neutron-linuxbridge-agent + + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/neutron/frontend.yml b/tools/ansible-openstack/playbooks/neutron/frontend.yml new file mode 100644 index 0000000..a57af2e --- /dev/null +++ b/tools/ansible-openstack/playbooks/neutron/frontend.yml @@ -0,0 +1,67 @@ +--- +- name: OpenStack Networking (Frontend) + hosts: frontend + sudo: yes + + tasks: + #OVS agent required due to the packaging bug of ml2 plugin. + - name: ensure neutron packages are installed + yum: name={{ item }} state=latest + with_items: + - MySQL-python + - openstack-neutron + - openstack-neutron-ml2 + - openstack-neutron-openvswitch + - openstack-utils + - openstack-selinux + - python-neutron + - python-neutronclient + notify: + - restart neutron-server + + # you can't use delegate_to and register at simultaneously + # delegate_to is used for simple "fire and forget" command. + - name: determine tenant id of "service" + shell: > + /usr/bin/keystone + --os-endpoint "{{ keystone_admin_url }}" + --os-token "{{ admin_token }}" + tenant-get {{service_tenant}} | grep "id " | awk {'print $4'} + register: nova_admin_tenant_id + + - name: set_fact nova_admin_tenant_id for later use + action: set_fact + args: + admin_tenantID: "{{ nova_admin_tenant_id.stdout }}" + + - name: ensure neutron configuration files are copied + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: root + group: neutron + mode: 0640 + with_items: + - { src: templates/etc/neutron/neutron.conf, dest: /etc/neutron/neutron.conf} + - { src: templates/etc/neutron/plugins/ml2/ml2_conf_linuxbridge_VLAN.ini, dest: /etc/neutron/plugins/ml2/ml2_conf.ini} + notify: + - restart neutron-server + + + - name: ensure symlink to ml2 plugin is created. + file: src=/etc/neutron/plugins/ml2/ml2_conf.ini + path=/etc/neutron/plugin.ini + state=link + + + - name: ensure neutron-server port 9696 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=9696 + + - name: ensure neutron-server automatically boots after reboot + service: name=neutron-server state=started enabled=yes + + handlers: + - include: ../reusables/handlers.yml + + - name: restart neutron-server + service: name=neutron-server state=restarted diff --git a/tools/ansible-openstack/playbooks/neutron/gateway.yml b/tools/ansible-openstack/playbooks/neutron/gateway.yml new file mode 100644 index 0000000..fdfdd5c --- /dev/null +++ b/tools/ansible-openstack/playbooks/neutron/gateway.yml @@ -0,0 +1,113 @@ +--- +- name: set fact for later use + hosts: frontend[0] + sudo: yes + tasks: + # you can't use delegate_to and register at simultaneously + # delegate_to is used for simple "fire and forget" command. + - name: determine tenant id of "service" + shell: > + /usr/bin/keystone + --os-endpoint "{{ keystone_admin_url }}" + --os-token "{{ admin_token }}" + tenant-get {{service_tenant}} | grep "id " | awk {'print $4'} + register: nova_admin_tenant_id + + - name: set_fact nova_admin_tenant_id for later use + action: set_fact + args: + admin_tenantID: "{{ nova_admin_tenant_id.stdout }}" + +- name: OpenStack Networking (Network Gateway) + hosts: network_gateway + sudo: yes + tasks: + - name: ensure neutron packages are installed + yum: name={{ item }} state=latest + with_items: + - MySQL-python + - openstack-neutron + - openstack-neutron-ml2 + - openstack-neutron-openvswitch + - openstack-neutron-linuxbridge + - openstack-utils + - openstack-selinux + - python-neutron + - python-neutronclient + - haproxy + notify: + - restart neutron-agents(gateway) + + + - name: ensure neutron configuration files are copied + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: root + group: neutron + mode: 0640 + with_items: + - { src: templates/etc/neutron/neutron.conf, dest: /etc/neutron/neutron.conf} + - { src: templates/etc/neutron/plugins/ml2/ml2_conf_linuxbridge_VLAN.ini, dest: /etc/neutron/plugins/ml2/ml2_conf.ini} + - { src: templates/etc/neutron/dhcp_agent.ini, dest: /etc/neutron/dhcp_agent.ini} + - { src: templates/etc/neutron/l3_agent.ini, dest: /etc/neutron/l3_agent.ini} + - { src: templates/etc/neutron/lbaas_agent.ini, dest: /etc/neutron/lbaas_agent.ini} + - { src: templates/etc/neutron/metadata_agent.ini, dest: /etc/neutron/metadata_agent.ini } + notify: + - restart neutron-agents(gateway) + + - name: ensure symlink to ml2 plugin is created. + file: src=/etc/neutron/plugins/ml2/ml2_conf.ini + path=/etc/neutron/plugin.ini + state=link + notify: + - restart neutron-agents(gateway) + + + - name: ensure neutron-linuxbridge-agent reads symlink + copy: + src: templates/etc/init.d/neutron-linuxbridge-agent + dest: /etc/init.d/neutron-linuxbridge-agent + owner: root + group: root + mode: 0755 + notify: + - restart neutron-agents(gateway) + + + - name: ensure neutron-linuxbridge-agent automatically boots after reboot + service: name=neutron-linuxbridge-agent state=started enabled=yes + + - name: ensure neutron-dhcp-agent automatically boots after reboot + service: name=neutron-dhcp-agent state=started enabled=yes + + - name: ensure L3-agent automatically boots after reboot + service: name=neutron-l3-agent state=started enabled=yes + + - name: ensure neutron-metadata-agent automatically boots after reboot + service: name=neutron-metadata-agent state=started enabled=yes + + - name: ensure neutron-lbaas-agent automatically boots after reboot + service: name=neutron-lbaas-agent state=started enabled=yes + + + - name: (WORKAROUND) comment out FORWARD REJECT rule from iptables to enable instace DHCP + include: ../reusables/delete_firewall_rule.yml delete_line='-A FORWARD -j REJECT --reject-with icmp-host-prohibited' + + + + + + + handlers: + - name: restart neutron-agents(gateway) + service: name={{ item }} state=restarted + with_items: + - neutron-dhcp-agent + - neutron-l3-agent + - neutron-lbaas-agent + - neutron-linuxbridge-agent + - neutron-metadata-agent + + - include: ../reusables/handlers.yml + diff --git a/tools/ansible-openstack/playbooks/neutron/keystone.yml b/tools/ansible-openstack/playbooks/neutron/keystone.yml new file mode 100644 index 0000000..66c739a --- /dev/null +++ b/tools/ansible-openstack/playbooks/neutron/keystone.yml @@ -0,0 +1,35 @@ +--- +- name: Configure neutron-api in keystone + hosts: frontend[0] + sudo: False + gather_facts: True + tasks: + + - name: create neutron service user + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: "{{ service_tenant }}" + user: neutron + password: "{{ neutron_identity_password }}" + + - name: add neutron service user to the service tenant with the admin role + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: "{{ service_tenant }}" + user: neutron + role: admin + + - name: add neutron endpoint to keystone + keystone_service: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + region: "{{ openstack_region }}" + name: neutron + type: network + description: "Networking Service" + public_url: "{{ neutron_public_url }}" + internal_url: "{{ neutron_internal_url }}" + admin_url: "{{ neutron_admin_url }}" + diff --git a/tools/ansible-openstack/playbooks/neutron/library b/tools/ansible-openstack/playbooks/neutron/library new file mode 100644 index 0000000..ad514cb --- /dev/null +++ b/tools/ansible-openstack/playbooks/neutron/library @@ -0,0 +1 @@ +../../openstack-ansible-modules/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/neutron/main.yml b/tools/ansible-openstack/playbooks/neutron/main.yml new file mode 100644 index 0000000..8aafa7a --- /dev/null +++ b/tools/ansible-openstack/playbooks/neutron/main.yml @@ -0,0 +1,8 @@ +--- +- include: mysql.yml +- include: keystone.yml +- include: frontend.yml +- include: gateway.yml +- include: compute.yml + + diff --git a/tools/ansible-openstack/playbooks/neutron/mysql.yml b/tools/ansible-openstack/playbooks/neutron/mysql.yml new file mode 100644 index 0000000..fc32123 --- /dev/null +++ b/tools/ansible-openstack/playbooks/neutron/mysql.yml @@ -0,0 +1,21 @@ +--- +- name: Install or verify neutron database + hosts: sql_backend[0] + sudo: True + tasks: + + - name: ensure neutron database is present + mysql_db: + name: neutron + encoding: utf8 + + - name: ensure neutron database user is present + mysql_user: + name: neutron + host: "{{ item }}" + password: "{{ neutron_db_password }}" + priv: neutron.*:ALL + with_items: + - "%" + - localhost + diff --git a/tools/ansible-openstack/playbooks/neutron/templates b/tools/ansible-openstack/playbooks/neutron/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/neutron/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/nova/compute.yml b/tools/ansible-openstack/playbooks/nova/compute.yml new file mode 100644 index 0000000..1be1c9b --- /dev/null +++ b/tools/ansible-openstack/playbooks/nova/compute.yml @@ -0,0 +1,59 @@ +--- +- name: OpenStack Compute (Backend) + hosts: compute_backend + sudo: yes + + tasks: + - name: ensure nova compute packages are installed + yum: name={{item}} state=latest + with_items: + - openstack-nova-compute + notify: + - ensure services are restarted + + + - name: ensure nova.conf file is configured + template: > + src=templates/etc/nova/nova.conf + dest=/etc/nova/nova.conf + owner=root group=nova mode=0640 backup=no + notify: + - ensure services are restarted + + - name: ensure vncserver_proxy address is set to self address + lineinfile: + dest: /etc/nova/nova.conf + regexp: "vncserver_proxyclient_address=" + line: "vncserver_proxyclient_address={{ my_int_ip }}" + state: present + + - name: ensure vncserver_listen address is set to self address + lineinfile: + dest: /etc/nova/nova.conf + regexp: "vncserver_listen=" + line: "vncserver_listen={{ my_int_ip }}" + state: present + + + + - name: ensure virbr0 is absent + shell: virsh net-destroy default && virsh net-undefine default + when: "'virbr0' in ansible_interfaces" + ignore_errors: True + notify: restart libvirtd + + - name: ensure neutron-server port 5900:5999 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port="5900:5999" + + - name: ensure nova-compute automatically boots after reboot + service: name=openstack-nova-compute state=started enabled=yes + + handlers: + - include: ../reusables/handlers.yml + + - name: restart libvirtd + service: name=libvirtd state=restarted + + - name: ensure services are restarted + service: name=openstack-nova-compute state=restarted + diff --git a/tools/ansible-openstack/playbooks/nova/controller.yml b/tools/ansible-openstack/playbooks/nova/controller.yml new file mode 100644 index 0000000..8fcfd3a --- /dev/null +++ b/tools/ansible-openstack/playbooks/nova/controller.yml @@ -0,0 +1,60 @@ +--- +- name: OpenStack Compute (Controller) + hosts: controller + sudo: yes + + tasks: + - name: ensure nova packages are installed + yum: name={{item}} state=latest + with_items: + - openstack-nova-novncproxy + - openstack-nova-console + - openstack-nova-conductor + - openstack-nova-scheduler + - python-novaclient + - openstack-nova-cert + notify: + - ensure services are restarted + + - name: ensure nova.conf file is configured + template: > + src=templates/etc/nova/nova.conf + dest=/etc/nova/nova.conf + owner=root group=nova mode=0640 backup=no + notify: + - ensure services are restarted + + + + - name: ensure nova port 6080 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=6080 + + - name: ensure nova port 6081 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=6081 + + - name: ensure nova services automatically boots after reboot + service: name={{item}} state=started enabled=yes + with_items: + - openstack-nova-consoleauth + - openstack-nova-console + - openstack-nova-novncproxy + - openstack-nova-xvpvncproxy + - openstack-nova-scheduler + - openstack-nova-cert + - openstack-nova-conductor + - messagebus + + + handlers: + - include: ../reusables/handlers.yml + + - name: ensure services are restarted + service: name={{item}} state=restarted + with_items: + - openstack-nova-consoleauth + - openstack-nova-console + - openstack-nova-novncproxy + - openstack-nova-xvpvncproxy + - openstack-nova-scheduler + - openstack-nova-cert + - openstack-nova-conductor diff --git a/tools/ansible-openstack/playbooks/nova/frontend.yml b/tools/ansible-openstack/playbooks/nova/frontend.yml new file mode 100644 index 0000000..7a308f0 --- /dev/null +++ b/tools/ansible-openstack/playbooks/nova/frontend.yml @@ -0,0 +1,59 @@ +--- +- name: OpenStack Compute (Frontend) + hosts: frontend + sudo: yes + + tasks: + - name: ensure nova packages are installed + yum: name={{item}} state=latest + with_items: + - openstack-nova-api + notify: + - ensure services are restarted + + + - name: ensure nova.conf file is configured + template: > + src=templates/etc/nova/nova.conf + dest=/etc/nova/nova.conf + owner=root group=nova mode=0640 backup=no + notify: + - ensure services are restarted + + - name: ensure nova port 8773 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=8773 + + - name: ensure nova port 8774 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=8774 + + - name: ensure nova port 8775 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=8775 + + + - name: ensure database is synchronized + command: su nova -s /bin/sh -c "nova-manage db sync" + notify: + - ensure services are restarted + + + - name: ensure nova services automatically boots after reboot + service: name={{item}} state=started enabled=yes + with_items: + - openstack-nova-api + - messagebus + + + + handlers: + - include: ../reusables/handlers.yml + + - name: ensure services are restarted + service: name={{item}} state=restarted + with_items: + - openstack-nova-api + - messagebus + + + + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/nova/keystone.yml b/tools/ansible-openstack/playbooks/nova/keystone.yml new file mode 100644 index 0000000..8fe9b8b --- /dev/null +++ b/tools/ansible-openstack/playbooks/nova/keystone.yml @@ -0,0 +1,51 @@ +--- +- name: Configure nova-api in keystone + hosts: frontend[0] + sudo: False + gather_facts: True + tasks: + + - name: create nova service user + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: "{{ service_tenant }}" + user: nova + password: "{{ nova_identity_password }}" + + - name: add nova service user to the service tenant with the admin role + keystone_user: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + tenant: "{{ service_tenant }}" + user: nova + role: admin + + - name: add nova endpoint to keystone + keystone_service: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + region: "{{ openstack_region }}" + name: nova + type: compute + description: "Compute Service" + public_url: "{{ nova_public_url }}" + internal_url: "{{ nova_internal_url }}" + admin_url: "{{ nova_admin_url }}" + + - name: add ec2 compatibility layer endpoint + keystone_service: + endpoint: "{{ keystone_admin_url }}" + token: "{{ admin_token }}" + region: "{{ openstack_region }}" + name: ec2 + type: ec2 + description: "EC2 Compatibility Layer" + public_url: "{{ ec2_public_url }}" + internal_url: "{{ ec2_internal_url }}" + admin_url: "{{ ec2_admin_url }}" + + + + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/nova/library b/tools/ansible-openstack/playbooks/nova/library new file mode 100644 index 0000000..ad514cb --- /dev/null +++ b/tools/ansible-openstack/playbooks/nova/library @@ -0,0 +1 @@ +../../openstack-ansible-modules/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/nova/main.yml b/tools/ansible-openstack/playbooks/nova/main.yml new file mode 100644 index 0000000..1353b09 --- /dev/null +++ b/tools/ansible-openstack/playbooks/nova/main.yml @@ -0,0 +1,5 @@ +- include: mysql.yml +- include: keystone.yml +- include: frontend.yml +- include: controller.yml +- include: compute.yml diff --git a/tools/ansible-openstack/playbooks/nova/mysql.yml b/tools/ansible-openstack/playbooks/nova/mysql.yml new file mode 100644 index 0000000..8e15d68 --- /dev/null +++ b/tools/ansible-openstack/playbooks/nova/mysql.yml @@ -0,0 +1,21 @@ +--- +- name: Install or verify nova database + hosts: sql_backend[0] + sudo: True + tasks: + + - name: ensure nova database is present + mysql_db: + name: nova + encoding: utf8 + + - name: ensure nova database user is present + mysql_user: + name: nova + host: "{{ item }}" + password: "{{ nova_db_password }}" + priv: nova.*:ALL + with_items: + - "%" + - localhost + diff --git a/tools/ansible-openstack/playbooks/nova/templates b/tools/ansible-openstack/playbooks/nova/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/nova/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/ntp/main.yml b/tools/ansible-openstack/playbooks/ntp/main.yml new file mode 100644 index 0000000..c478e96 --- /dev/null +++ b/tools/ansible-openstack/playbooks/ntp/main.yml @@ -0,0 +1,34 @@ +--- +- name: NTP Server + hosts: all + sudo: yes + + tasks: + - name: ensure ntp package is installed + yum: name=ntp state=latest + when: "ntp_server is defined" + + - name: ensure ntp.conf file is configured + template: > + src=templates/etc/ntp.conf + dest=/etc/ntp.conf + owner=root + group=root + mode=0644 + notify: restart ntp + when: "ntp_server is defined" + + - name: synchronize clock now. + command: /usr/sbin/ntpdate -bu {{ ntp_server }} + when: "ntp_server is defined" + + + handlers: + - name: restart ntp + service: name=ntpd state=restarted enabled=on + + + + + + diff --git a/tools/ansible-openstack/playbooks/ntp/templates b/tools/ansible-openstack/playbooks/ntp/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/ntp/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/post_action/compute.yml b/tools/ansible-openstack/playbooks/post_action/compute.yml new file mode 100644 index 0000000..b9252d2 --- /dev/null +++ b/tools/ansible-openstack/playbooks/post_action/compute.yml @@ -0,0 +1,16 @@ +--- +- name: restart services(COMPUTE) + hosts: compute_backend + serial: 1 + sudo: yes + gather_facts: no + + tasks: + - name: ensure COMPUTE services are restarted + service: name={{ item }} state=restarted + with_items: + - neutron-linuxbridge-agent + - libvirtd + - openstack-nova-compute + + diff --git a/tools/ansible-openstack/playbooks/post_action/controller.yml b/tools/ansible-openstack/playbooks/post_action/controller.yml new file mode 100644 index 0000000..b897f49 --- /dev/null +++ b/tools/ansible-openstack/playbooks/post_action/controller.yml @@ -0,0 +1,25 @@ +--- +- name: restart services(controller) + hosts: controller + serial: 1 + sudo: yes + gather_facts: no + + tasks: + - name: ensure CONTROLLER services are restarted + service: name={{item}} state=restarted + with_items: + - openstack-cinder-scheduler + - openstack-glance-registry + - openstack-nova-cert + - openstack-nova-conductor + - openstack-nova-console + - openstack-nova-consoleauth + - openstack-nova-novncproxy + - openstack-nova-scheduler + - openstack-nova-xvpvncproxy + + + + + diff --git a/tools/ansible-openstack/playbooks/post_action/frontend.yml b/tools/ansible-openstack/playbooks/post_action/frontend.yml new file mode 100644 index 0000000..b7e3774 --- /dev/null +++ b/tools/ansible-openstack/playbooks/post_action/frontend.yml @@ -0,0 +1,20 @@ +--- +- name: restart services(frontend) + hosts: frontend + serial: 1 + sudo: yes + gather_facts: no + + tasks: + - name: ensure FRONTEND services are restarted + service: name={{item}} state=restarted + with_items: + - openstack-keystone + - openstack-glance-api + - openstack-cinder-api + - neutron-server + - openstack-nova-api + + + + diff --git a/tools/ansible-openstack/playbooks/post_action/gateway.yml b/tools/ansible-openstack/playbooks/post_action/gateway.yml new file mode 100644 index 0000000..0ad20ee --- /dev/null +++ b/tools/ansible-openstack/playbooks/post_action/gateway.yml @@ -0,0 +1,20 @@ +--- +- name: restart services(NW_Gateway) + hosts: network_gateway + serial: 1 + sudo: yes + gather_facts: no + + tasks: + - name: ensure GATEWAY services are restarted + service: name={{ item }} state=restarted + with_items: + - neutron-dhcp-agent + - neutron-linuxbridge-agent + - neutron-l3-agent + - neutron-metadata-agent + - neutron-lbaas-agent + + + + diff --git a/tools/ansible-openstack/playbooks/post_action/library b/tools/ansible-openstack/playbooks/post_action/library new file mode 100644 index 0000000..ad514cb --- /dev/null +++ b/tools/ansible-openstack/playbooks/post_action/library @@ -0,0 +1 @@ +../../openstack-ansible-modules/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/post_action/main.yml b/tools/ansible-openstack/playbooks/post_action/main.yml new file mode 100644 index 0000000..28edcaa --- /dev/null +++ b/tools/ansible-openstack/playbooks/post_action/main.yml @@ -0,0 +1,10 @@ +- include: rabbitmq.yml +- include: compute.yml +- include: gateway.yml +- include: controller.yml +- include: frontend.yml +- include: post-prepareProviderNW.yml + +#- include: post-prepareTenant-keystone.yml +#- include: post-prepareTenant-NW.yml + diff --git a/tools/ansible-openstack/playbooks/post_action/post-prepareProviderNW.yml b/tools/ansible-openstack/playbooks/post_action/post-prepareProviderNW.yml new file mode 100644 index 0000000..4ad437d --- /dev/null +++ b/tools/ansible-openstack/playbooks/post_action/post-prepareProviderNW.yml @@ -0,0 +1,40 @@ +--- +- name: prepare provider Network + hosts: frontend[0] + sudo: yes + + tasks: + - name: ensure provider network exists + neutron_network: > + name={{ provider_nw_name }} + tenant_name={{ service_tenant }} + provider_network_type={{ provider_network_type }} + provider_physical_network=phys_external + router_external=yes + state=present + region_name={{ openstack_region }} + auth_url={{ keystone_admin_url }} + login_username={{ admin_user }} + login_password={{ admin_password }} + login_tenant_name={{ admin_tenant }} + + - name: ensure provider subnetwork exists + neutron_subnet: > + name={{ provider_subnet_name }} + network_name={{ provider_nw_name }} + cidr={{ provider_cidr }} + tenant_name={{ service_tenant }} + ip_version=4 + enable_dhcp={{ provider_enable_dhcp }} + gateway_ip={{ provider_gateway_ip }} + allocation_pool_start={{ provider_allocation_pool_start }} + allocation_pool_end={{ provider_allocation_pool_end }} + state=present + region_name={{ openstack_region }} + auth_url={{ keystone_admin_url }} + login_username={{ admin_user }} + login_password={{ admin_password }} + login_tenant_name={{ admin_tenant }} + + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/post_action/rabbitmq.yml b/tools/ansible-openstack/playbooks/post_action/rabbitmq.yml new file mode 100644 index 0000000..21c6515 --- /dev/null +++ b/tools/ansible-openstack/playbooks/post_action/rabbitmq.yml @@ -0,0 +1,15 @@ +--- +- name: Restart RabbitMQ server + hosts: amqp_backend + serial: 1 + sudo: yes + gather_facts: no + tasks: + + - name: ensure RabbitMQ service is restarted + service: name={{ item }} state=restarted + with_items: + - rabbitmq-server + + + \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/pre_action/main.yml b/tools/ansible-openstack/playbooks/pre_action/main.yml new file mode 100644 index 0000000..ae71593 --- /dev/null +++ b/tools/ansible-openstack/playbooks/pre_action/main.yml @@ -0,0 +1,3 @@ +--- +- include: setup.yml + diff --git a/tools/ansible-openstack/playbooks/pre_action/setup.yml b/tools/ansible-openstack/playbooks/pre_action/setup.yml new file mode 100644 index 0000000..ed1b2c5 --- /dev/null +++ b/tools/ansible-openstack/playbooks/pre_action/setup.yml @@ -0,0 +1,46 @@ +--- +- name: Host Variables + hosts: all + gather_facts: yes + sudo: yes + + tasks: + + - name: ensure my_int_ip is a fact variable + action: set_fact + args: + my_int_ip: "{{ my_int_ip }}" + + + - name: ensure my_ext_ip is a fact variable + action: set_fact + args: + my_ext_ip: "{{ my_ext_ip }}" + when: my_ext_obj['ipv4']['address'] is defined + + # When linuxbridge is constructed for flat network, it enslaves the eth interface + # and IP address for interface is reattached to the bridge. + # Following two tasks determine ip address attached to the bridge and set_fact for later use. + - name: determine ipAddr for linuxbridge linked interface + shell: /sbin/bridge link show | grep {{ my_ext_if }} | awk '{print $10}' | sed -e "s/-/_/" + register: linked_bridge + when: '"ipv4" not in my_ext_obj' + + - name: ensure my_ext_ip is fact variable for linuxbridge linked interface + action: set_fact + args: + my_ext_ip: "{{ hostvars[inventory_hostname]['ansible_' + linked_bridge.stdout]['ipv4']['address'] }}" + when: '"ipv4" not in my_ext_obj' + + + - name: ensure my_mng_ip is a fact variable + action: set_fact + args: + my_mng_ip: "{{ my_mng_ip }}" + + + - name: ensure my_ext_ip is a fact variable (br-ex) + action: set_fact + args: + my_ext_ip: "{{ ansible_br_ex['ipv4']['address'] }}" + when: "ansible_br_ex is defined" diff --git a/tools/ansible-openstack/playbooks/rabbitmq/main.yml b/tools/ansible-openstack/playbooks/rabbitmq/main.yml new file mode 100644 index 0000000..ede5898 --- /dev/null +++ b/tools/ansible-openstack/playbooks/rabbitmq/main.yml @@ -0,0 +1,71 @@ +--- +- name: RabbitMQ Server + hosts: amqp_backend + gather_facts: yes + sudo: yes + + tasks: + + - name: ensure rabbitmq-server packages are installed + yum: name={{ item }} state=latest + with_items: + - rabbitmq-server + - erlang + + - name: ensure port 5672 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=5672 + - name: ensure port 4369 is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port=4369 + - name: ensure port $amqp_erlang_port is opened + include: ../reusables/open_firewall_port.yml protocol=tcp port={{ amqp_erlang_port }} + + + - name: ensure erlang port is configured for rabbitmq( and cluster ) + template: > + src=templates/etc/rabbitmq/rabbitmq.config + dest=/etc/rabbitmq/rabbitmq.config + owner=root + group=root + mode=0644 + notify: + - restart rabbitmq-server + + + - name: ensure erlang kernel parameter is set + template: > + src=templates/etc/rabbitmq/rabbitmq-env.conf + dest=/etc/rabbitmq/rabbitmq-env.conf + owner=root + group=root + mode=0644 + notify: + - restart rabbitmq-server + + + - name: ensure rabbitmq runs automatically after boot. + service: name=rabbitmq-server state=started enabled=yes + + - name: ensure only rabbitmq user is admin + rabbitmq_user: user={{ amqp_user }} + password={{ amqp_pass }} + vhost=/ + configure_priv=.* + read_priv=.* + write_priv=.* + state=present + tags=administrator + + - name: ensure guest user is removed from rabbitmq + rabbitmq_user: user=guest + vhost=/ + state=absent + + + handlers: + - include: ../reusables/handlers.yml + - name: restart rabbitmq-server + service: name=rabbitmq-server state=restarted + + + + diff --git a/tools/ansible-openstack/playbooks/rabbitmq/templates b/tools/ansible-openstack/playbooks/rabbitmq/templates new file mode 100644 index 0000000..7cb455a --- /dev/null +++ b/tools/ansible-openstack/playbooks/rabbitmq/templates @@ -0,0 +1 @@ +../../templates/ \ No newline at end of file diff --git a/tools/ansible-openstack/playbooks/reusables/delete_firewall_rule.yml b/tools/ansible-openstack/playbooks/reusables/delete_firewall_rule.yml new file mode 100644 index 0000000..e33df4c --- /dev/null +++ b/tools/ansible-openstack/playbooks/reusables/delete_firewall_rule.yml @@ -0,0 +1,6 @@ +--- +# delete iptables rule from /etc/sysconfig/iptables. Don't forget to include handler.yml at caller script. + - name: ensure iptables rule is deleted. + lineinfile: "dest=/etc/sysconfig/iptables regexp=^'{{ delete_line }}' line='{{ '#' + delete_line }}'" + notify: restart iptables + diff --git a/tools/ansible-openstack/playbooks/reusables/handlers.yml b/tools/ansible-openstack/playbooks/reusables/handlers.yml new file mode 100644 index 0000000..e508c7f --- /dev/null +++ b/tools/ansible-openstack/playbooks/reusables/handlers.yml @@ -0,0 +1,7 @@ +--- +# handlers here + + - name: restart iptables + service: name=iptables state=restarted + + diff --git a/tools/ansible-openstack/playbooks/reusables/open_firewall_port.yml b/tools/ansible-openstack/playbooks/reusables/open_firewall_port.yml new file mode 100644 index 0000000..6fe7fe8 --- /dev/null +++ b/tools/ansible-openstack/playbooks/reusables/open_firewall_port.yml @@ -0,0 +1,9 @@ +--- +# open port for iptables. Don't forget to include handler.yml at caller script. + - name: ensure port is opened. + lineinfile: dest=/etc/sysconfig/iptables + line="-A INPUT -p {{ protocol }} --dport {{ port }} -m state --state NEW -j ACCEPT" + insertafter="-A INPUT -i lo -j ACCEPT" + state=present + notify: restart iptables + diff --git a/tools/ansible-openstack/set_openstack.yml b/tools/ansible-openstack/set_openstack.yml new file mode 100644 index 0000000..58340c0 --- /dev/null +++ b/tools/ansible-openstack/set_openstack.yml @@ -0,0 +1,13 @@ +- include: playbooks/pre_action/main.yml +- include: playbooks/common/main.yml +- include: playbooks/ntp/main.yml +- include: playbooks/rabbitmq/main.yml +- include: playbooks/mysql/main.yml +- include: playbooks/memcached/main.yml +- include: playbooks/keystone/main.yml +- include: playbooks/glance/main.yml +- include: playbooks/cinder/main.yml +- include: playbooks/neutron/main.yml +- include: playbooks/nova/main.yml +- include: playbooks/horizon/main.yml +- include: playbooks/post_action/main.yml \ No newline at end of file diff --git a/tools/ansible-openstack/templates/etc/cinder/cinder.conf b/tools/ansible-openstack/templates/etc/cinder/cinder.conf new file mode 100644 index 0000000..4ae678a --- /dev/null +++ b/tools/ansible-openstack/templates/etc/cinder/cinder.conf @@ -0,0 +1,2163 @@ +[DEFAULT] + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +rabbit_hosts={{ amqp_host }}:5672 + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +#rabbit_userid=guest +rabbit_userid={{ amqp_user }} + +# The RabbitMQ password. (string value) +#rabbit_password=guest +rabbit_password={{ amqp_pass }} + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=cinder + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +notification_driver=cinder.openstack.common.notifier.rpc_notifier + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in cinder.exception +# + +# make exception message format errors fatal (boolean value) +#fatal_exception_format_errors=false + + +# +# Options defined in cinder.policy +# + +# JSON file representing policy (string value) +#policy_file=policy.json + +# Rule checked when requested rule is not found (string value) +#policy_default_rule=default + + +# +# Options defined in cinder.quota +# + +# number of volumes allowed per project (integer value) +#quota_volumes=10 + +# number of volume snapshots allowed per project (integer +# value) +#quota_snapshots=10 + +# number of volume gigabytes (snapshots are also included) +# allowed per project (integer value) +#quota_gigabytes=1000 + +# number of seconds until a reservation expires (integer +# value) +#reservation_expire=86400 + +# count of reservations until usage is refreshed (integer +# value) +#until_refresh=0 + +# number of seconds between subsequent usage refreshes +# (integer value) +#max_age=0 + +# default driver to use for quota checks (string value) +#quota_driver=cinder.quota.DbQuotaDriver + +# whether to use default quota class for default quota +# (boolean value) +#use_default_quota_class=true + + +# +# Options defined in cinder.service +# + +# seconds between nodes reporting state to datastore (integer +# value) +#report_interval=10 + +# seconds between running periodic tasks (integer value) +#periodic_interval=60 + +# range of seconds to randomly delay when starting the +# periodic task scheduler to reduce stampeding. (Disable by +# setting to 0) (integer value) +#periodic_fuzzy_delay=60 + +# IP address for OpenStack Volume API to listen (string value) +#osapi_volume_listen=0.0.0.0 + +# port for os volume api to listen (integer value) +#osapi_volume_listen_port=8776 + +# Number of workers for OpenStack Volume API service (integer +# value) +osapi_volume_workers=1 + + +# +# Options defined in cinder.test +# + +# File name of clean sqlite db (string value) +#sqlite_clean_db=clean.sqlite + + +# +# Options defined in cinder.wsgi +# + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepidle=600 + +# CA certificate file to use to verify connecting clients +# (string value) +#ssl_ca_file= + +# Certificate file to use when starting the server securely +# (string value) +#ssl_cert_file= + +# Private key file to use when starting the server securely +# (string value) +#ssl_key_file= + + +# +# Options defined in cinder.api.common +# + +# the maximum number of items returned in a single response +# from a collection resource (integer value) +#osapi_max_limit=1000 + +# Base URL that will be presented to users in links to the +# OpenStack Volume API (string value) +# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix +#osapi_volume_base_URL= + + +# +# Options defined in cinder.api.middleware.auth +# + +# Treat X-Forwarded-For as the canonical remote address. Only +# enable this if you have a sanitizing proxy. (boolean value) +#use_forwarded_for=false + + +# +# Options defined in cinder.api.middleware.sizelimit +# + +# Max size for body of a request (integer value) +#osapi_max_request_body_size=114688 + + +# +# Options defined in cinder.backup.driver +# + +# Backup metadata version to be used when backing up volume +# metadata. If this number is bumped, make sure the service +# doing the restore supports the new version. (integer value) +#backup_metadata_version=1 + + +# +# Options defined in cinder.backup.drivers.ceph +# + +# Ceph configuration file to use. (string value) +#backup_ceph_conf=/etc/ceph/ceph.conf + +# The Ceph user to connect with. Default here is to use the +# same user as for Cinder volumes. If not using cephx this +# should be set to None. (string value) +#backup_ceph_user=cinder + +# The chunk size, in bytes, that a backup is broken into +# before transfer to the Ceph object store. (integer value) +#backup_ceph_chunk_size=134217728 + +# The Ceph pool where volume backups are stored. (string +# value) +#backup_ceph_pool=backups + +# RBD stripe unit to use when creating a backup image. +# (integer value) +#backup_ceph_stripe_unit=0 + +# RBD stripe count to use when creating a backup image. +# (integer value) +#backup_ceph_stripe_count=0 + +# If True, always discard excess bytes when restoring volumes +# i.e. pad with zeroes. (boolean value) +#restore_discard_excess_bytes=true + + +# +# Options defined in cinder.backup.drivers.swift +# + +# The URL of the Swift endpoint (string value) +#backup_swift_url=http://localhost:8080/v1/AUTH_ + +# Swift authentication mechanism (string value) +#backup_swift_auth=per_user + +# Swift user name (string value) +#backup_swift_user= + +# Swift key for authentication (string value) +#backup_swift_key= + +# The default Swift container to use (string value) +#backup_swift_container=volumebackups + +# The size in bytes of Swift backup objects (integer value) +#backup_swift_object_size=52428800 + +# The number of retries to make for Swift operations (integer +# value) +#backup_swift_retry_attempts=3 + +# The backoff time in seconds between Swift retries (integer +# value) +#backup_swift_retry_backoff=2 + +# Compression algorithm (None to disable) (string value) +#backup_compression_algorithm=zlib + + +# +# Options defined in cinder.backup.drivers.tsm +# + +# Volume prefix for the backup id when backing up to TSM +# (string value) +#backup_tsm_volume_prefix=backup + +# TSM password for the running username (string value) +#backup_tsm_password=password + +# Enable or Disable compression for backups (boolean value) +#backup_tsm_compression=true + + +# +# Options defined in cinder.backup.manager +# + +# Driver to use for backups. (string value) +# Deprecated group/name - [DEFAULT]/backup_service +#backup_driver=cinder.backup.drivers.swift + + +# +# Options defined in cinder.common.config +# + +# File name for the paste.deploy config for cinder-api (string +# value) +#api_paste_config=api-paste.ini + +# Top-level directory for maintaining cinder's state (string +# value) +# Deprecated group/name - [DEFAULT]/pybasedir +#state_path=/var/lib/cinder + +# ip address of this host (string value) +my_ip={{ my_int_ip }} + +# default glance hostname or ip (string value) +glance_host={{ frontend_int_ip }} + +# default glance port (integer value) +#glance_port=9292 + +# A list of the glance api servers available to cinder +# ([hostname|ip]:port) (list value) +#glance_api_servers=$glance_host:$glance_port + +# Version of the glance api to use (integer value) +#glance_api_version=1 + +# Number retries when downloading an image from glance +# (integer value) +#glance_num_retries=0 + +# Allow to perform insecure SSL (https) requests to glance +# (boolean value) +#glance_api_insecure=false + +# Whether to attempt to negotiate SSL layer compression when +# using SSL (https) requests. Set to False to disable SSL +# layer compression. In some cases disabling this may improve +# data throughput, eg when high network bandwidth is available +# and you are using already compressed image formats such as +# qcow2 . (boolean value) +#glance_api_ssl_compression=false + +# http/https timeout value for glance operations. If no value +# (None) is supplied here, the glanceclient default value is +# used. (integer value) +#glance_request_timeout= + +# the topic scheduler nodes listen on (string value) +#scheduler_topic=cinder-scheduler + +# the topic volume nodes listen on (string value) +#volume_topic=cinder-volume + +# the topic volume backup nodes listen on (string value) +#backup_topic=cinder-backup + +# Deploy v1 of the Cinder API. (boolean value) +#enable_v1_api=true + +# Deploy v2 of the Cinder API. (boolean value) +#enable_v2_api=true + +# whether to rate limit the api (boolean value) +#api_rate_limit=true + +# Specify list of extensions to load when using +# osapi_volume_extension option with +# cinder.api.contrib.select_extensions (list value) +#osapi_volume_ext_list= + +# osapi volume extension to load (multi valued) +#osapi_volume_extension=cinder.api.contrib.standard_extensions + +# full class name for the Manager for volume (string value) +#volume_manager=cinder.volume.manager.VolumeManager + +# full class name for the Manager for volume backup (string +# value) +#backup_manager=cinder.backup.manager.BackupManager + +# full class name for the Manager for scheduler (string value) +#scheduler_manager=cinder.scheduler.manager.SchedulerManager + +# Name of this node. This can be an opaque identifier. It is +# not necessarily a hostname, FQDN, or IP address. (string +# value) +#host=cinder + +# availability zone of this node (string value) +#storage_availability_zone=nova + +# default availability zone to use when creating a new volume. +# If this is not set then we use the value from the +# storage_availability_zone option as the default +# availability_zone for new volumes. (string value) +#default_availability_zone= + +# default volume type to use (string value) +#default_volume_type= + +# time period to generate volume usages for. Time period must +# be hour, day, month or year (string value) +#volume_usage_audit_period=month + +# Path to the rootwrap configuration file to use for running +# commands as root (string value) +#rootwrap_config=/etc/cinder/rootwrap.conf + +# Enable monkey patching (boolean value) +#monkey_patch=false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules= + +# maximum time since last check-in for up service (integer +# value) +#service_down_time=60 + +# The full class name of the volume API class to use (string +# value) +#volume_api_class=cinder.volume.api.API + +# The full class name of the volume backup API class (string +# value) +#backup_api_class=cinder.backup.api.API + +# The strategy to use for auth. Supports noauth, keystone, and +# deprecated. (string value) +auth_strategy=keystone + +# A list of backend names to use. These backend names should +# be backed by a unique [CONFIG] group with its options (list +# value) +#enabled_backends= + +# Whether snapshots count against GigaByte quota (boolean +# value) +#no_snapshot_gb_quota=false + +# The full class name of the volume transfer API class (string +# value) +#transfer_api_class=cinder.transfer.api.API + + +# +# Options defined in cinder.compute +# + +# The full class name of the compute API class to use (string +# value) +#compute_api_class=cinder.compute.nova.API + + +# +# Options defined in cinder.compute.nova +# + +# Info to match when looking for nova in the service catalog. +# Format is : separated values of the form: +# :: (string value) +#nova_catalog_info=compute:nova:publicURL + +# Same as nova_catalog_info, but for admin endpoint. (string +# value) +#nova_catalog_admin_info=compute:nova:adminURL + +# Override service catalog lookup with template for nova +# endpoint e.g. http://localhost:8774/v2/%(project_id)s +# (string value) +#nova_endpoint_template= + +# Same as nova_endpoint_template, but for admin endpoint. +# (string value) +#nova_endpoint_admin_template= + +# region name of this node (string value) +#os_region_name= + +# Location of ca certificates file to use for nova client +# requests. (string value) +#nova_ca_certificates_file= + +# Allow to perform insecure SSL requests to nova (boolean +# value) +#nova_api_insecure=false + + +# +# Options defined in cinder.db.api +# + +# The backend to use for db (string value) +#db_backend=sqlalchemy + +# Services to be added to the available pool on create +# (boolean value) +#enable_new_services=true + +# Template string to be used to generate volume names (string +# value) +#volume_name_template=volume-%s + +# Template string to be used to generate snapshot names +# (string value) +#snapshot_name_template=snapshot-%s + +# Template string to be used to generate backup names (string +# value) +#backup_name_template=backup-%s + + +# +# Options defined in cinder.db.base +# + +# driver to use for database access (string value) +#db_driver=cinder.db + + +# +# Options defined in cinder.image.glance +# + +# A list of url schemes that can be downloaded directly via +# the direct_url. Currently supported schemes: [file]. (list +# value) +#allowed_direct_url_schemes= + + +# +# Options defined in cinder.image.image_utils +# + +# Directory used for temporary storage during image conversion +# (string value) +#image_conversion_dir=$state_path/conversion + + +# +# Options defined in cinder.openstack.common.db.sqlalchemy.session +# + +# the filename to use with sqlite (string value) +#sqlite_db=cinder.sqlite + +# If true, use synchronous mode for sqlite (boolean value) +#sqlite_synchronous=true + + +# +# Options defined in cinder.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in cinder.openstack.common.lockutils +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. Default to a temp directory +# (string value) +#lock_path= + + +# +# Options defined in cinder.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +debug={{ log_debug }} + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +verbose={{ log_verbose }} + +# Log output to standard error (boolean value) +#use_stderr=true + +# Format string to use for log messages with context (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN + +# Publish error events (boolean value) +#publish_errors=false + +# Make deprecations fatal (boolean value) +#fatal_deprecations=false + +# If an instance is passed with the log message, format it +# like this (string value) +#instance_format="[instance: %(uuid)s] " + +# If an instance UUID is passed with the log message, format +# it like this (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of logging configuration file. It does not disable +# existing loggers, but just appends specified logging +# configuration to any other existing logging options. Please +# see the Python logging module documentation for details on +# logging configuration files. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and then will be changed in J to honor RFC5424 +# (boolean value) +#use_syslog=false + +# (Optional) Use syslog rfc5424 format for logging. If +# enabled, will add APP-NAME (RFC5424) before the MSG part of +# the syslog message. The old format without APP-NAME is +# deprecated in I, and will be removed in J. (boolean value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in cinder.openstack.common.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + + +# +# Options defined in cinder.scheduler.driver +# + +# The scheduler host manager class to use (string value) +#scheduler_host_manager=cinder.scheduler.host_manager.HostManager + +# Maximum number of attempts to schedule an volume (integer +# value) +#scheduler_max_attempts=3 + + +# +# Options defined in cinder.scheduler.host_manager +# + +# Which filter class names to use for filtering hosts when not +# specified in the request. (list value) +#scheduler_default_filters=AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter + +# Which weigher class names to use for weighing hosts. (list +# value) +#scheduler_default_weighers=CapacityWeigher + + +# +# Options defined in cinder.scheduler.manager +# + +# Default scheduler driver to use (string value) +#scheduler_driver=cinder.scheduler.filter_scheduler.FilterScheduler + + +# +# Options defined in cinder.scheduler.scheduler_options +# + +# Absolute path to scheduler configuration JSON file. (string +# value) +#scheduler_json_config_location= + + +# +# Options defined in cinder.scheduler.simple +# + +# This configure option has been deprecated along with the +# SimpleScheduler. New scheduler is able to gather capacity +# information for each host, thus setting the maximum number +# of volume gigabytes for host is no longer needed. It's safe +# to remove this configure from cinder.conf. (integer value) +#max_gigabytes=10000 + + +# +# Options defined in cinder.scheduler.weights.capacity +# + +# Multiplier used for weighing volume capacity. Negative +# numbers mean to stack vs spread. (floating point value) +#capacity_weight_multiplier=1.0 + +# Multiplier used for weighing volume capacity. Negative +# numbers mean to stack vs spread. (floating point value) +#allocated_capacity_weight_multiplier=-1.0 + + +# +# Options defined in cinder.transfer.api +# + +# The number of characters in the salt. (integer value) +#volume_transfer_salt_length=8 + +# The number of characters in the autogenerated auth key. +# (integer value) +#volume_transfer_key_length=16 + + +# +# Options defined in cinder.volume.api +# + +# Create volume from snapshot at the host where snapshot +# resides (boolean value) +#snapshot_same_host=true + +# Ensure that the new volumes are the same AZ as snapshot or +# source volume (boolean value) +#cloned_volume_same_az=true + + +# +# Options defined in cinder.volume.driver +# + +# The maximum number of times to rescan iSER targetto find +# volume (integer value) +#num_iser_scan_tries=3 + +# The maximum number of iser target ids per host (integer +# value) +#iser_num_targets=100 + +# prefix for iser volumes (string value) +#iser_target_prefix=iqn.2010-10.org.iser.openstack: + +# The IP address that the iSER daemon is listening on (string +# value) +#iser_ip_address=$my_ip + +# The port that the iSER daemon is listening on (integer +# value) +#iser_port=3260 + +# iser target user-land tool to use (string value) +#iser_helper=tgtadm + +# number of times to attempt to run flakey shell commands +# (integer value) +#num_shell_tries=3 + +# The percentage of backend capacity is reserved (integer +# value) +#reserved_percentage=0 + +# The maximum number of iscsi target ids per host (integer +# value) +#iscsi_num_targets=100 + +# prefix for iscsi volumes (string value) +#iscsi_target_prefix=iqn.2010-10.org.openstack: + +# The IP address that the iSCSI daemon is listening on (string +# value) +iscsi_ip_address={{ cinder_volume_host }} + +# The port that the iSCSI daemon is listening on (integer +# value) +#iscsi_port=3260 + +# The maximum number of times to rescan targets to find volume +# (integer value) +# Deprecated group/name - [DEFAULT]/num_iscsi_scan_tries +#num_volume_device_scan_tries=3 + +# The backend name for a given driver implementation (string +# value) +#volume_backend_name= + +# Do we attach/detach volumes in cinder using multipath for +# volume to image and image to volume transfers? (boolean +# value) +#use_multipath_for_image_xfer=false + +# Method used to wipe old voumes (valid options are: none, +# zero, shred) (string value) +#volume_clear=zero + +# Size in MiB to wipe at start of old volumes. 0 => all +# (integer value) +#volume_clear_size=0 + +# The flag to pass to ionice to alter the i/o priority of the +# process used to zero a volume after deletion, for example +# "-c3" for idle only priority. (string value) +#volume_clear_ionice= + +# iscsi target user-land tool to use (string value) +iscsi_helper=tgtadm + +# Volume configuration file storage directory (string value) +#volumes_dir=$state_path/volumes + +# IET configuration file (string value) +#iet_conf=/etc/iet/ietd.conf + +# Comma-separated list of initiator IQNs allowed to connect to +# the iSCSI target. (From Nova compute nodes.) (string value) +#lio_initiator_iqns= + +# Sets the behavior of the iSCSI target to either perform +# blockio or fileio optionally, auto can be set and Cinder +# will autodetect type of backing device (string value) +#iscsi_iotype=fileio + +# The default block size used when copying/clearing volumes +# (string value) +#volume_dd_blocksize=1M + + +# +# Options defined in cinder.volume.drivers.block_device +# + +# List of all available devices (list value) +#available_devices= + + +# +# Options defined in cinder.volume.drivers.coraid +# + +# IP address of Coraid ESM (string value) +#coraid_esm_address= + +# User name to connect to Coraid ESM (string value) +#coraid_user=admin + +# Name of group on Coraid ESM to which coraid_user belongs +# (must have admin privilege) (string value) +#coraid_group=admin + +# Password to connect to Coraid ESM (string value) +#coraid_password=password + +# Volume Type key name to store ESM Repository Name (string +# value) +#coraid_repository_key=coraid_repository + + +# +# Options defined in cinder.volume.drivers.emc.emc_smis_common +# + +# use this file for cinder emc plugin config data (string +# value) +#cinder_emc_config_file=/etc/cinder/cinder_emc_config.xml + + +# +# Options defined in cinder.volume.drivers.emc.emc_vnx_cli +# + +# Naviseccli Path (string value) +#naviseccli_path= + +# ISCSI pool name (string value) +#storage_vnx_pool_name= + +# Default Time Out For CLI operations in minutes (integer +# value) +#default_timeout=20 + +# Default max number of LUNs in a storage group (integer +# value) +#max_luns_per_storage_group=256 + + +# +# Options defined in cinder.volume.drivers.eqlx +# + +# Group name to use for creating volumes (string value) +#eqlx_group_name=group-0 + +# Timeout for the Group Manager cli command execution (integer +# value) +#eqlx_cli_timeout=30 + +# Maximum retry count for reconnection (integer value) +#eqlx_cli_max_retries=5 + +# Use CHAP authentication for targets? (boolean value) +#eqlx_use_chap=false + +# Existing CHAP account name (string value) +#eqlx_chap_login=admin + +# Password for specified CHAP account name (string value) +#eqlx_chap_password=password + +# Pool in which volumes will be created (string value) +#eqlx_pool=default + + +# +# Options defined in cinder.volume.drivers.glusterfs +# + +# File with the list of available gluster shares (string +# value) +#glusterfs_shares_config=/etc/cinder/glusterfs_shares + +# Create volumes as sparsed files which take no space.If set +# to False volume is created as regular file.In such case +# volume creation takes a lot of time. (boolean value) +#glusterfs_sparsed_volumes=true + +# Create volumes as QCOW2 files rather than raw files. +# (boolean value) +#glusterfs_qcow2_volumes=false + +# Base dir containing mount points for gluster shares. (string +# value) +#glusterfs_mount_point_base=$state_path/mnt + + +# +# Options defined in cinder.volume.drivers.hds.hds +# + +# configuration file for HDS cinder plugin for HUS (string +# value) +#hds_cinder_config_file=/opt/hds/hus/cinder_hus_conf.xml + + +# +# Options defined in cinder.volume.drivers.huawei +# + +# config data for cinder huawei plugin (string value) +#cinder_huawei_conf_file=/etc/cinder/cinder_huawei_conf.xml + + +# +# Options defined in cinder.volume.drivers.ibm.gpfs +# + +# Specifies the path of the GPFS directory where Block Storage +# volume and snapshot files are stored. (string value) +#gpfs_mount_point_base= + +# Specifies the path of the Image service repository in GPFS. +# Leave undefined if not storing images in GPFS. (string +# value) +#gpfs_images_dir= + +# Specifies the type of image copy to be used. Set this when +# the Image service repository also uses GPFS so that image +# files can be transferred efficiently from the Image service +# to the Block Storage service. There are two valid values: +# "copy" specifies that a full copy of the image is made; +# "copy_on_write" specifies that copy-on-write optimization +# strategy is used and unmodified blocks of the image file are +# shared efficiently. (string value) +#gpfs_images_share_mode= + +# Specifies an upper limit on the number of indirections +# required to reach a specific block due to snapshots or +# clones. A lengthy chain of copy-on-write snapshots or +# clones can have a negative impact on performance, but +# improves space utilization. 0 indicates unlimited clone +# depth. (integer value) +#gpfs_max_clone_depth=0 + +# Specifies that volumes are created as sparse files which +# initially consume no space. If set to False, the volume is +# created as a fully allocated file, in which case, creation +# may take a significantly longer time. (boolean value) +#gpfs_sparse_volumes=true + +# Specifies the storage pool that volumes are assigned to. By +# default, the system storage pool is used. (string value) +#gpfs_storage_pool= + + +# +# Options defined in cinder.volume.drivers.ibm.storwize_svc +# + +# Storage system storage pool for volumes (string value) +#storwize_svc_volpool_name=volpool + +# Storage system space-efficiency parameter for volumes +# (percentage) (integer value) +#storwize_svc_vol_rsize=2 + +# Storage system threshold for volume capacity warnings +# (percentage) (integer value) +#storwize_svc_vol_warning=0 + +# Storage system autoexpand parameter for volumes (True/False) +# (boolean value) +#storwize_svc_vol_autoexpand=true + +# Storage system grain size parameter for volumes +# (32/64/128/256) (integer value) +#storwize_svc_vol_grainsize=256 + +# Storage system compression option for volumes (boolean +# value) +#storwize_svc_vol_compression=false + +# Enable Easy Tier for volumes (boolean value) +#storwize_svc_vol_easytier=true + +# The I/O group in which to allocate volumes (integer value) +#storwize_svc_vol_iogrp=0 + +# Maximum number of seconds to wait for FlashCopy to be +# prepared. Maximum value is 600 seconds (10 minutes) (integer +# value) +#storwize_svc_flashcopy_timeout=120 + +# Connection protocol (iSCSI/FC) (string value) +#storwize_svc_connection_protocol=iSCSI + +# Configure CHAP authentication for iSCSI connections +# (Default: Enabled) (boolean value) +#storwize_svc_iscsi_chap_enabled=true + +# Connect with multipath (FC only; iSCSI multipath is +# controlled by Nova) (boolean value) +#storwize_svc_multipath_enabled=false + +# Allows vdisk to multi host mapping (boolean value) +#storwize_svc_multihostmap_enabled=true + + +# +# Options defined in cinder.volume.drivers.ibm.xiv_ds8k +# + +# Proxy driver that connects to the IBM Storage Array (string +# value) +#xiv_ds8k_proxy=xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy + +# Connection type to the IBM Storage Array +# (fibre_channel|iscsi) (string value) +#xiv_ds8k_connection_type=iscsi + +# CHAP authentication mode, effective only for iscsi +# (disabled|enabled) (string value) +#xiv_chap=disabled + + +# +# Options defined in cinder.volume.drivers.lvm +# + +# Name for the VG that will contain exported volumes (string +# value) +volume_group={{ cinder_volume }} + +# If set, create lvms with multiple mirrors. Note that this +# requires lvm_mirrors + 2 pvs with available space (integer +# value) +#lvm_mirrors=0 + +# Type of LVM volumes to deploy; (default or thin) (string +# value) +#lvm_type=default + + +# +# Options defined in cinder.volume.drivers.netapp.options +# + +# The vFiler unit on which provisioning of block storage +# volumes will be done. This option is only used by the driver +# when connecting to an instance with a storage family of Data +# ONTAP operating in 7-Mode and the storage protocol selected +# is iSCSI. Only use this option when utilizing the MultiStore +# feature on the NetApp storage system. (string value) +#netapp_vfiler= + +# Administrative user account name used to access the storage +# system or proxy server. (string value) +#netapp_login= + +# Password for the administrative user account specified in +# the netapp_login option. (string value) +#netapp_password= + +# This option specifies the virtual storage server (Vserver) +# name on the storage cluster on which provisioning of block +# storage volumes should occur. If using the NFS storage +# protocol, this parameter is mandatory for storage service +# catalog support (utilized by Cinder volume type extra_specs +# support). If this option is specified, the exports belonging +# to the Vserver will only be used for provisioning in the +# future. Block storage volumes on exports not belonging to +# the Vserver specified by this option will continue to +# function normally. (string value) +#netapp_vserver= + +# The hostname (or IP address) for the storage system or proxy +# server. (string value) +#netapp_server_hostname= + +# The TCP port to use for communication with the storage +# system or proxy server. Traditionally, port 80 is used for +# HTTP and port 443 is used for HTTPS; however, this value +# should be changed if an alternate port has been configured +# on the storage system or proxy server. (integer value) +#netapp_server_port=80 + +# This option is used to specify the path to the E-Series +# proxy application on a proxy server. The value is combined +# with the value of the netapp_transport_type, +# netapp_server_hostname, and netapp_server_port options to +# create the URL used by the driver to connect to the proxy +# application. (string value) +#netapp_webservice_path=/devmgr/v2 + +# This option is only utilized when the storage family is +# configured to eseries. This option is used to restrict +# provisioning to the specified controllers. Specify the value +# of this option to be a comma separated list of controller +# hostnames or IP addresses to be used for provisioning. +# (string value) +#netapp_controller_ips= + +# Password for the NetApp E-Series storage array. (string +# value) +#netapp_sa_password= + +# This option is used to restrict provisioning to the +# specified storage pools. Only dynamic disk pools are +# currently supported. Specify the value of this option to be +# a comma separated list of disk pool names to be used for +# provisioning. (string value) +#netapp_storage_pools= + +# If the percentage of available space for an NFS share has +# dropped below the value specified by this option, the NFS +# image cache will be cleaned. (integer value) +#thres_avl_size_perc_start=20 + +# When the percentage of available space on an NFS share has +# reached the percentage specified by this option, the driver +# will stop clearing files from the NFS image cache that have +# not been accessed in the last M minutes, where M is the +# value of the expiry_thres_minutes configuration option. +# (integer value) +#thres_avl_size_perc_stop=60 + +# This option specifies the threshold for last access time for +# images in the NFS image cache. When a cache cleaning cycle +# begins, images in the cache that have not been accessed in +# the last M minutes, where M is the value of this parameter, +# will be deleted from the cache to create free space on the +# NFS share. (integer value) +#expiry_thres_minutes=720 + +# This option specifies the path of the NetApp copy offload +# tool binary. Ensure that the binary has execute permissions +# set which allow the effective user of the cinder-volume +# process to execute the file. (string value) +#netapp_copyoffload_tool_path= + +# The quantity to be multiplied by the requested volume size +# to ensure enough space is available on the virtual storage +# server (Vserver) to fulfill the volume creation request. +# (floating point value) +#netapp_size_multiplier=1.2 + +# This option is only utilized when the storage protocol is +# configured to use iSCSI. This option is used to restrict +# provisioning to the specified controller volumes. Specify +# the value of this option to be a comma separated list of +# NetApp controller volume names to be used for provisioning. +# (string value) +#netapp_volume_list= + +# The storage family type used on the storage system; valid +# values are ontap_7mode for using Data ONTAP operating in +# 7-Mode, ontap_cluster for using clustered Data ONTAP, or +# eseries for using E-Series. (string value) +#netapp_storage_family=ontap_cluster + +# The storage protocol to be used on the data path with the +# storage system; valid values are iscsi or nfs. (string +# value) +#netapp_storage_protocol= + +# The transport protocol used when communicating with the +# storage system or proxy server. Valid values are http or +# https. (string value) +#netapp_transport_type=http + + +# +# Options defined in cinder.volume.drivers.nexenta.options +# + +# IP address of Nexenta SA (string value) +#nexenta_host= + +# HTTP port to connect to Nexenta REST API server (integer +# value) +#nexenta_rest_port=2000 + +# Use http or https for REST connection (default auto) (string +# value) +#nexenta_rest_protocol=auto + +# User name to connect to Nexenta SA (string value) +#nexenta_user=admin + +# Password to connect to Nexenta SA (string value) +#nexenta_password=nexenta + +# Nexenta target portal port (integer value) +#nexenta_iscsi_target_portal_port=3260 + +# pool on SA that will hold all volumes (string value) +#nexenta_volume=cinder + +# IQN prefix for iSCSI targets (string value) +#nexenta_target_prefix=iqn.1986-03.com.sun:02:cinder- + +# prefix for iSCSI target groups on SA (string value) +#nexenta_target_group_prefix=cinder/ + +# File with the list of available nfs shares (string value) +#nexenta_shares_config=/etc/cinder/nfs_shares + +# Base dir containing mount points for nfs shares (string +# value) +#nexenta_mount_point_base=$state_path/mnt + +# Create volumes as sparsed files which take no space.If set +# to False volume is created as regular file.In such case +# volume creation takes a lot of time. (boolean value) +#nexenta_sparsed_volumes=true + +# Default compression value for new ZFS folders. (string +# value) +#nexenta_volume_compression=on + +# If set True cache NexentaStor appliance volroot option +# value. (boolean value) +#nexenta_nms_cache_volroot=true + +# Enable stream compression, level 1..9. 1 - gives best speed; +# 9 - gives best compression. (integer value) +#nexenta_rrmgr_compression=0 + +# TCP Buffer size in KiloBytes. (integer value) +#nexenta_rrmgr_tcp_buf_size=4096 + +# Number of TCP connections. (integer value) +#nexenta_rrmgr_connections=2 + +# block size for volumes (blank=default,8KB) (string value) +#nexenta_blocksize= + +# flag to create sparse volumes (boolean value) +#nexenta_sparse=false + + +# +# Options defined in cinder.volume.drivers.nfs +# + +# IP address or Hostname of NAS system. (string value) +#nas_ip= + +# User name to connect to NAS system. (string value) +#nas_login=admin + +# Password to connect to NAS system. (string value) +#nas_password= + +# SSH port to use to connect to NAS system. (integer value) +#nas_ssh_port=22 + +# Filename of private key to use for SSH authentication. +# (string value) +#nas_private_key= + +# File with the list of available nfs shares (string value) +#nfs_shares_config=/etc/cinder/nfs_shares + +# Create volumes as sparsed files which take no space.If set +# to False volume is created as regular file.In such case +# volume creation takes a lot of time. (boolean value) +#nfs_sparsed_volumes=true + +# Percent of ACTUAL usage of the underlying volume before no +# new volumes can be allocated to the volume destination. +# (floating point value) +#nfs_used_ratio=0.95 + +# This will compare the allocated to available space on the +# volume destination. If the ratio exceeds this number, the +# destination will no longer be valid. (floating point value) +#nfs_oversub_ratio=1.0 + +# Base dir containing mount points for nfs shares. (string +# value) +#nfs_mount_point_base=$state_path/mnt + +# Mount options passed to the nfs client. See section of the +# nfs man page for details. (string value) +#nfs_mount_options= + + +# +# Options defined in cinder.volume.drivers.rbd +# + +# the RADOS pool in which rbd volumes are stored (string +# value) +#rbd_pool=rbd + +# the RADOS client name for accessing rbd volumes - only set +# when using cephx authentication (string value) +#rbd_user= + +# path to the ceph configuration file to use (string value) +#rbd_ceph_conf= + +# flatten volumes created from snapshots to remove dependency +# (boolean value) +#rbd_flatten_volume_from_snapshot=false + +# the libvirt uuid of the secret for the rbd_uservolumes +# (string value) +#rbd_secret_uuid= + +# where to store temporary image files if the volume driver +# does not write them directly to the volume (string value) +#volume_tmp_dir= + +# maximum number of nested clones that can be taken of a +# volume before enforcing a flatten prior to next clone. A +# value of zero disables cloning (integer value) +#rbd_max_clone_depth=5 + + +# +# Options defined in cinder.volume.drivers.san.hp.hp_3par_common +# + +# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 +# (string value) +#hp3par_api_url= + +# 3PAR Super user username (string value) +#hp3par_username= + +# 3PAR Super user password (string value) +#hp3par_password= + +# The CPG to use for volume creation (string value) +#hp3par_cpg=OpenStack + +# The CPG to use for Snapshots for volumes. If empty +# hp3par_cpg will be used (string value) +#hp3par_cpg_snap= + +# The time in hours to retain a snapshot. You can't delete it +# before this expires. (string value) +#hp3par_snapshot_retention= + +# The time in hours when a snapshot expires and is deleted. +# This must be larger than expiration (string value) +#hp3par_snapshot_expiration= + +# Enable HTTP debugging to 3PAR (boolean value) +#hp3par_debug=false + +# List of target iSCSI addresses to use. (list value) +#hp3par_iscsi_ips= + + +# +# Options defined in cinder.volume.drivers.san.hp.hp_lefthand_rest_proxy +# + +# HP LeftHand WSAPI Server Url like https://:8081/lhos (string value) +#hplefthand_api_url= + +# HP LeftHand Super user username (string value) +#hplefthand_username= + +# HP LeftHand Super user password (string value) +#hplefthand_password= + +# HP LeftHand cluster name (string value) +#hplefthand_clustername= + +# Configure CHAP authentication for iSCSI connections +# (Default: Disabled) (boolean value) +#hplefthand_iscsi_chap_enabled=false + +# Enable HTTP debugging to LeftHand (boolean value) +#hplefthand_debug=false + + +# +# Options defined in cinder.volume.drivers.san.hp.hp_msa_common +# + +# The VDisk to use for volume creation. (string value) +#msa_vdisk=OpenStack + + +# +# Options defined in cinder.volume.drivers.san.san +# + +# Use thin provisioning for SAN volumes? (boolean value) +#san_thin_provision=true + +# IP address of SAN controller (string value) +#san_ip= + +# Username for SAN controller (string value) +#san_login=admin + +# Password for SAN controller (string value) +#san_password= + +# Filename of private key to use for SSH authentication +# (string value) +#san_private_key= + +# Cluster name to use for creating volumes (string value) +#san_clustername= + +# SSH port to use with SAN (integer value) +#san_ssh_port=22 + +# Execute commands locally instead of over SSH; use if the +# volume service is running on the SAN device (boolean value) +#san_is_local=false + +# SSH connection timeout in seconds (integer value) +#ssh_conn_timeout=30 + +# Minimum ssh connections in the pool (integer value) +#ssh_min_pool_conn=1 + +# Maximum ssh connections in the pool (integer value) +#ssh_max_pool_conn=5 + + +# +# Options defined in cinder.volume.drivers.san.solaris +# + +# The ZFS path under which to create zvols for volumes. +# (string value) +#san_zfs_volume_base=rpool/ + + +# +# Options defined in cinder.volume.drivers.scality +# + +# Path or URL to Scality SOFS configuration file (string +# value) +#scality_sofs_config= + +# Base dir where Scality SOFS shall be mounted (string value) +#scality_sofs_mount_point=$state_path/scality + +# Path from Scality SOFS root to volume dir (string value) +#scality_sofs_volume_dir=cinder/volumes + + +# +# Options defined in cinder.volume.drivers.solidfire +# + +# Set 512 byte emulation on volume creation; (boolean value) +#sf_emulate_512=true + +# Allow tenants to specify QOS on create (boolean value) +#sf_allow_tenant_qos=false + +# Create SolidFire accounts with this prefix. Any string can +# be used here, but the string "hostname" is special and will +# create a prefix using the cinder node hostsname (previous +# default behavior). The default is NO prefix. (string value) +#sf_account_prefix= + +# SolidFire API port. Useful if the device api is behind a +# proxy on a different port. (integer value) +#sf_api_port=443 + + +# +# Options defined in cinder.volume.drivers.vmware.vmdk +# + +# IP address for connecting to VMware ESX/VC server. (string +# value) +#vmware_host_ip= + +# Username for authenticating with VMware ESX/VC server. +# (string value) +#vmware_host_username= + +# Password for authenticating with VMware ESX/VC server. +# (string value) +#vmware_host_password= + +# Optional VIM service WSDL Location e.g +# http:///vimService.wsdl. Optional over-ride to +# default location for bug work-arounds. (string value) +#vmware_wsdl_location= + +# Number of times VMware ESX/VC server API must be retried +# upon connection related issues. (integer value) +#vmware_api_retry_count=10 + +# The interval (in seconds) for polling remote tasks invoked +# on VMware ESX/VC server. (integer value) +#vmware_task_poll_interval=5 + +# Name for the folder in the VC datacenter that will contain +# cinder volumes. (string value) +#vmware_volume_folder=cinder-volumes + +# Timeout in seconds for VMDK volume transfer between Cinder +# and Glance. (integer value) +#vmware_image_transfer_timeout_secs=7200 + +# Max number of objects to be retrieved per batch. Query +# results will be obtained in batches from the server and not +# in one shot. Server may still limit the count to something +# less than the configured value. (integer value) +#vmware_max_objects_retrieval=100 + +# Optional string specifying the VMware VC server version. The +# driver attempts to retrieve the version from VMware VC +# server. Set this configuration only if you want to override +# the VC server version. (string value) +#vmware_host_version= + + +# +# Options defined in cinder.volume.drivers.windows.windows +# + +# Path to store VHD backed volumes (string value) +#windows_iscsi_lun_path=C:\iSCSIVirtualDisks + + +# +# Options defined in cinder.volume.drivers.xenapi.sm +# + +# NFS server to be used by XenAPINFSDriver (string value) +#xenapi_nfs_server= + +# Path of exported NFS, used by XenAPINFSDriver (string value) +#xenapi_nfs_serverpath= + +# URL for XenAPI connection (string value) +#xenapi_connection_url= + +# Username for XenAPI connection (string value) +#xenapi_connection_username=root + +# Password for XenAPI connection (string value) +#xenapi_connection_password= + +# Base path to the storage repository (string value) +#xenapi_sr_base_path=/var/run/sr-mount + + +# +# Options defined in cinder.volume.drivers.zadara +# + +# Management IP of Zadara VPSA (string value) +#zadara_vpsa_ip= + +# Zadara VPSA port number (string value) +#zadara_vpsa_port= + +# Use SSL connection (boolean value) +#zadara_vpsa_use_ssl=false + +# User name for the VPSA (string value) +#zadara_user= + +# Password for the VPSA (string value) +#zadara_password= + +# Name of VPSA storage pool for volumes (string value) +#zadara_vpsa_poolname= + +# Default thin provisioning policy for volumes (boolean value) +#zadara_vol_thin=true + +# Default encryption policy for volumes (boolean value) +#zadara_vol_encrypt=false + +# Default template for VPSA volume names (string value) +#zadara_vol_name_template=OS_%s + +# Automatically detach from servers on volume delete (boolean +# value) +#zadara_vpsa_auto_detach_on_delete=true + +# Don't halt on deletion of non-existing volumes (boolean +# value) +#zadara_vpsa_allow_nonexistent_delete=true + + +# +# Options defined in cinder.volume.manager +# + +# Driver to use for volume creation (string value) +volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver + +# Timeout for creating the volume to migrate to when +# performing volume migration (seconds) (integer value) +#migration_create_volume_timeout_secs=300 + +# Offload pending volume delete during volume service startup +# (boolean value) +#volume_service_inithost_offload=false + +# FC Zoning mode configured (string value) +#zoning_mode=none + +# User defined capabilities, a JSON formatted string +# specifying key/value pairs. (string value) +#extra_capabilities={} + + +[BRCD_FABRIC_EXAMPLE] + +# +# Options defined in cinder.zonemanager.drivers.brocade.brcd_fabric_opts +# + +# Management IP of fabric (string value) +#fc_fabric_address= + +# Fabric user ID (string value) +#fc_fabric_user= + +# Password for user (string value) +#fc_fabric_password= + +# Connecting port (integer value) +#fc_fabric_port=22 + +# overridden zoning policy (string value) +#zoning_policy=initiator-target + +# overridden zoning activation state (boolean value) +#zone_activate=true + +# overridden zone name prefix (string value) +#zone_name_prefix= + +# Principal switch WWN of the fabric (string value) +#principal_switch_wwn= + + +[database] + +# +# Options defined in cinder.openstack.common.db.api +# + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# Enable the experimental use of thread pooling for all DB API +# calls (boolean value) +# Deprecated group/name - [DEFAULT]/dbapi_use_tpool +#use_tpool=false + + +# +# Options defined in cinder.openstack.common.db.sqlalchemy.session +# + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +#connection=sqlite:///$state_path/$sqlite_db +connection=mysql://cinder:{{ cinder_db_password }}@{{ sql_host }}/cinder + + +# timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +idle_timeout=200 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +#max_pool_size=5 + +# maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +#max_retries=10 + +# interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + + +[fc-zone-manager] + +# +# Options defined in cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver +# + +# Southbound connector for zoning operation (string value) +#brcd_sb_connector=cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.BrcdFCZoneClientCLI + + +# +# Options defined in cinder.zonemanager.fc_zone_manager +# + +# FC Zone Driver responsible for zone management (string +# value) +#zone_driver=cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver + +# Zoning policy configured by user (string value) +#zoning_policy=initiator-target + +# Comma separated list of fibre channel fabric names. This +# list of names is used to retrieve other SAN credentials for +# connecting to each SAN fabric (string value) +#fc_fabric_names= + +# FC San Lookup Service (string value) +#fc_san_lookup_service=cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService + + +[keymgr] + +# +# Options defined in cinder.keymgr +# + +# The full class name of the key manager API class (string +# value) +#api_class=cinder.keymgr.conf_key_mgr.ConfKeyManager + + +# +# Options defined in cinder.keymgr.conf_key_mgr +# + +# Fixed key returned by key manager, specified in hex (string +# value) +#fixed_key= + + +[keystone_authtoken] +auth_host={{ frontend_int_ip }} +auth_port=35357 +auth_protocol=http +admin_tenant_name={{ service_tenant }} +admin_user=cinder +admin_password={{ cinder_identity_password }} +identity_uri=http://{{ frontend_int_ip }}:35357 +auth_uri=http://{{ frontend_ext_ip }}:5000 + +# +# Options defined in keystoneclient.middleware.auth_token +# + +# Prefix to prepend at the beginning of the path (string +# value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint (string +# value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint(http or https) +# (string value) +#auth_protocol=https + +# Complete public Identity API endpoint (string value) +#auth_uri= + +# API version of the admin Identity API endpoint (string +# value) +#auth_version= + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# Single shared secret with the Keystone configuration used +# for bootstrapping a Keystone installation, or otherwise +# bypassing the normal authentication process. (string value) +#admin_token= + +# Keystone account username (string value) +#admin_user= + +# Keystone account password (string value) +#admin_password= + +# Keystone service account tenant name to validate user tokens +# (string value) +#admin_tenant_name=admin + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for +# caching. If left undefined, tokens will instead be cached +# in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating +# tokens, the middleware caches previously-seen tokens for a +# configurable duration (in seconds). Set to -1 to disable +# caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens +# is retrieved from the Identity service (in seconds). A high +# number of revocation events combined with a low cache +# duration may significantly reduce performance. (integer +# value) +#revocation_cache_time=300 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[ssl] + +# +# Options defined in cinder.openstack.common.sslutils +# + +# CA certificate file to use to verify connecting clients +# (string value) +#ca_file= + +# Certificate file to use when starting the server securely +# (string value) +#cert_file= + +# Private key file to use when starting the server securely +# (string value) +#key_file= + + diff --git a/tools/ansible-openstack/templates/etc/glance/glance-api.conf b/tools/ansible-openstack/templates/etc/glance/glance-api.conf new file mode 100644 index 0000000..9a2b450 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/glance/glance-api.conf @@ -0,0 +1,674 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +verbose={{ log_verbose }} + +# Show debugging output in logs (sets DEBUG log level output) +debug={{ log_debug }} + +# Which backend scheme should Glance use by default is not specified +# in a request to add a new image to Glance? Known schemes are determined +# by the known_stores option below. +# Default: 'file' +default_store=file + +# List of which store classes and store class locations are +# currently known to glance at startup. +# Existing but disabled stores: +# glance.store.rbd.Store, +# glance.store.s3.Store, +# glance.store.swift.Store, +# glance.store.cinder.Store, +# glance.store.gridfs.Store, +#known_stores=glance.store.filesystem.Store, +# glance.store.http.Store +known_stores=glance.store.filesystem.Store + +# Maximum image size (in bytes) that may be uploaded through the +# Glance API server. Defaults to 1 TB. +# WARNING: this value should only be increased after careful consideration +# and must be set to a value under 8 EB (9223372036854775808). +#image_size_cap=1099511627776 + +# Address to bind the API server +#bind_host=0.0.0.0 + +# Port the bind the API server to +#bind_port=9292 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +#log_file=/var/log/glance/api.log + +# Backlog requests when creating socket +#backlog=4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle=600 + +# API to use for accessing data. Default value points to sqlalchemy +# package, it is also possible to use: glance.db.registry.api +# data_api = glance.db.sqlalchemy.api + +# Number of Glance API worker processes to start. +# On machines with more than one CPU increasing this value +# may improve performance (especially if using SSL with +# compression turned on). It is typically recommended to set +# this value to the number of CPUs present on your machine. +#workers=1 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large tokens +# (typically those generated by the Keystone v3 API with big service +# catalogs) +# max_header_line = 16384 + +# Role used to identify an authenticated user as administrator +#admin_role=admin + +# Allow unauthenticated users to access the API with read-only +# privileges. This only applies when using ContextMiddleware. +#allow_anonymous_access=False + +# Allow access to version 1 of glance api +#enable_v1_api=True + +# Allow access to version 2 of glance api +#enable_v2_api=True + +# Return the URL that references where the data is stored on +# the backend storage system. For example, if using the +# file system store a URL of 'file:///path/to/image' will +# be returned to the user in the 'direct_url' meta-data field. +# The default value is false. +#show_image_direct_url=False + +# Send headers containing user and tenant information when making requests to +# the v1 glance registry. This allows the registry to function as if a user is +# authenticated without the need to authenticate a user itself using the +# auth_token middleware. +# The default value is false. +#send_identity_headers=False + +# Supported values for the 'container_format' image attribute +#container_formats=ami,ari,aki,bare,ovf,ova + +# Supported values for the 'disk_format' image attribute +#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso + +# Directory to use for lock files. Default to a temp directory +# (string value). This setting needs to be the same for both +# glance-scrubber and glance-api. +#lock_path= + +# Property Protections config file +# This file contains the rules for property protections and the roles/policies +# associated with it. +# If this config value is not specified, by default, property protections +# won't be enforced. +# If a value is specified and the file is not found, then the glance-api +# service will not start. +#property_protection_file = + +# Specify whether 'roles' or 'policies' are used in the +# property_protection_file. +# The default value for property_protection_rule_format is 'roles'. +#property_protection_rule_format=roles + +# Specifies how long (in hours) a task is supposed to live in the tasks DB +# after succeeding or failing before getting soft-deleted. +# The default value for task_time_to_live is 48 hours. +# task_time_to_live = 48 + +# This value sets what strategy will be used to determine the image location +# order. Currently two strategies are packaged with Glance 'location_order' +# and 'store_type'. +#location_strategy=location_order + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog=False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility=LOG_LOCAL0 + +# ================= SSL Options =============================== + +# Certificate file to use when starting API server securely +#cert_file=/path/to/certfile + +# Private key file to use when starting API server securely +#key_file=/path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file=/path/to/cafile + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key=<16, 24 or 32 char registry metadata key> + +# ============ Registry Options =============================== + +# Address to find the registry server +registry_host={{ glance_registry_host }} + +# Port the registry server is listening on +#registry_port=9191 + +# What protocol to use when connecting to the registry server? +# Set to https for secure HTTP communication +#registry_client_protocol=http + +# The path to the key file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file +#registry_client_key_file=/path/to/key/file + +# The path to the cert file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file +#registry_client_cert_file=/path/to/cert/file + +# The path to the certifying authority cert file to use in SSL connections +# to the registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file +#registry_client_ca_file=/path/to/ca/file + +# When using SSL in connections to the registry server, do not require +# validation via a certifying authority. This is the registry's equivalent of +# specifying --insecure on the command line using glanceclient for the API +# Default: False +#registry_client_insecure=False + +# The period of time, in seconds, that the API server will wait for a registry +# request to complete. A value of '0' implies no timeout. +# Default: 600 +#registry_client_timeout=600 + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create=False + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug=True + +# Pass the user's token through for API requests to the registry. +# Default: True +#use_user_token=True + +# If 'use_user_token' is not in effect then admin credentials +# can be specified. Requests to the registry on behalf of +# the API will use these credentials. +# Admin user name +#admin_user=%SERVICE_USER% +# Admin password +#admin_password=%SERVICE_PASSWORD% +# Admin tenant name +#admin_tenant_name=%SERVICE_TENANT_NAME% +# Keystone endpoint +#auth_url=None +# Keystone region +#auth_region=None +# Auth strategy +#auth_strategy=keystone + +# ============ Notification System Options ===================== + +# Notifications can be sent when images are create, updated or deleted. +# There are three methods of sending notifications, logging (via the +# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid +# message queue), or noop (no notifications sent, the default) +# NOTE: THIS CONFIGURATION OPTION HAS BEEN DEPRECATED IN FAVOR OF `notification_driver` +# notifier_strategy = default + +# Driver or drivers to handle sending notifications +# notification_driver = noop + +# Default publisher_id for outgoing notifications. +# default_publisher_id = image.localhost + +# Configuration options if sending notifications via rabbitmq (these are +# the defaults) +#rabbit_host=localhost +#rabbit_port=5672 +rabbit_hosts={{ amqp_host }}:5672 +#rabbit_use_ssl=false +#rabbit_userid=guest +rabbit_userid={{ amqp_user }} +#rabbit_password=guest +rabbit_password={{ amqp_pass }} +#rabbit_virtual_host=/ +#rabbit_notification_exchange=glance +#rabbit_notification_topic=notifications +#rabbit_durable_queues=False + +# Configuration options if sending notifications via Qpid (these are +# the defaults) +#qpid_notification_exchange=glance +#qpid_notification_topic=notifications +#qpid_hostname=localhost +#qpid_port=5672 +#qpid_username= +#qpid_password= +#qpid_sasl_mechanisms= +#qpid_reconnect_timeout=0 +#qpid_reconnect_limit=0 +#qpid_reconnect_interval_min=0 +#qpid_reconnect_interval_max=0 +#qpid_reconnect_interval=0 +#qpid_heartbeat=5 +# Set to 'ssl' to enable SSL +#qpid_protocol=tcp +#qpid_tcp_nodelay=True + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +#filesystem_store_datadir=/var/lib/glance/images/ + +# A list of directories where image data can be stored. +# This option may be specified multiple times for specifying multiple store +# directories. Either one of filesystem_store_datadirs or +# filesystem_store_datadir option is required. A priority number may be given +# after each directory entry, separated by a ":". +# When adding an image, the highest priority directory will be selected, unless +# there is not enough space available in cases where the image size is already +# known. If no priority is given, it is assumed to be zero and the directory +# will be considered for selection last. If multiple directories have the same +# priority, then the one with the most free space available is selected. +# If same store is specified multiple times then BadStoreConfiguration +# exception will be raised. +#filesystem_store_datadirs=/var/lib/glance/images/:1 + +# A path to a JSON file that contains metadata describing the storage +# system. When show_multiple_locations is True the information in this +# file will be returned with any location that is contained in this +# store. +#filesystem_store_metadata_file=None + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +#swift_store_auth_version=2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like '127.0.0.1:8080/v1.0/' +#swift_store_auth_address=127.0.0.1:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +#swift_store_user=jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +#swift_store_key=a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +#swift_store_container=glance + +# Do we create the container if it does not exist? +#swift_store_create_container_on_put=False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +#swift_store_large_object_size=5120 + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +#swift_store_large_object_chunk_size=200 + +# Whether to use ServiceNET to communicate with the Swift storage servers. +# (If you aren't RACKSPACE, leave this False!) +# +# To use ServiceNET for authentication, prefix hostname of +# `swift_store_auth_address` with 'snet-'. +# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ +#swift_enable_snet=False + +# If set to True enables multi-tenant storage mode which causes Glance images +# to be stored in tenant specific Swift accounts. +#swift_store_multi_tenant=False + +# A list of swift ACL strings that will be applied as both read and +# write ACLs to the containers created by Glance in multi-tenant +# mode. This grants the specified tenants/users read and write access +# to all newly created image objects. The standard swift ACL string +# formats are allowed, including: +# : +# : +# *: +# Multiple ACLs can be combined using a comma separated list, for +# example: swift_store_admin_tenants = service:glance,*:admin +#swift_store_admin_tenants = + +# The region of the swift endpoint to be used for single tenant. This setting +# is only necessary if the tenant has multiple swift endpoints. +#swift_store_region = + +# If set to False, disables SSL layer compression of https swift requests. +# Setting to 'False' may improve performance for images which are already +# in a compressed format, eg qcow2. If set to True, enables SSL layer +# compression (provided it is supported by the target swift proxy). +#swift_store_ssl_compression=True + +# The number of times a Swift download will be retried before the +# request fails +#swift_store_retry_get_count=0 + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +#s3_store_host=127.0.0.1:8080/v1.0/ + +# User to authenticate against the S3 authentication service +#s3_store_access_key=<20-charAWSaccesskey> + +# Auth key for the user authenticating against the +# S3 authentication service +#s3_store_secret_key=<40-charAWSsecretkey> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +#s3_store_bucket=glance + +# Do we create the bucket if it does not exist? +#s3_store_create_bucket_on_put=False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +#s3_store_object_buffer_dir=/path/to/dir + +# When forming a bucket url, boto will either set the bucket name as the +# subdomain or as the first token of the path. Amazon's S3 service will +# accept it as the subdomain, but Swift's S3 middleware requires it be +# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'. +#s3_store_bucket_url_format=subdomain + +# ============ RBD Store Options ============================= + +# Ceph configuration file path +# If using cephx authentication, this file should +# include a reference to the right keyring +# in a client. section +#rbd_store_ceph_conf=/etc/ceph/ceph.conf + +# RADOS user to authenticate as (only applicable if using cephx) +# If , a default will be chosen based on the client. section +# in rbd_store_ceph_conf +#rbd_store_user= + +# RADOS pool in which images are stored +#rbd_store_pool=images + +# RADOS images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +#rbd_store_chunk_size=8 + +# ============ Sheepdog Store Options ============================= + +#sheepdog_store_address=localhost + +#sheepdog_store_port=7000 + +# Images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +#sheepdog_store_chunk_size=64 + +# ============ Cinder Store Options =============================== + +# Info to match when looking for cinder in the service catalog +# Format is : separated values of the form: +# :: (string value) +#cinder_catalog_info=volume:cinder:publicURL + +# Override service catalog lookup with template for cinder endpoint +# e.g. http://localhost:8776/v1/%(project_id)s (string value) +#cinder_endpoint_template= + +# Region name of this node (string value) +#os_region_name= + +# Location of ca certicates file to use for cinder client requests +# (string value) +#cinder_ca_certificates_file= + +# Number of cinderclient retries on failed http calls (integer value) +#cinder_http_retries=3 + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure=False + +# ============ VMware Datastore Store Options ===================== + +# ESX/ESXi or vCenter Server target system. +# The server value can be an IP address or a DNS name +# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com +#vmware_server_host= + +# Server username (string value) +#vmware_server_username= + +# Server password (string value) +#vmware_server_password= + +# Inventory path to a datacenter (string value) +# Value optional when vmware_server_ip is an ESX/ESXi host: if specified +# should be `ha-datacenter`. +#vmware_datacenter_path= + +# Datastore associated with the datacenter (string value) +#vmware_datastore_name= + +# The number of times we retry on failures +# e.g., socket error, etc (integer value) +#vmware_api_retry_count=10 + +# The interval used for polling remote tasks +# invoked on VMware ESX/VC server in seconds (integer value) +#vmware_task_poll_interval=5 + +# Absolute path of the folder containing the images in the datastore +# (string value) +#vmware_store_image_dir=/openstack_glance + +# Allow to perform insecure SSL requests to the target system (boolean value) +#vmware_api_insecure=False + +# ============ Delayed Delete Options ============================= + +# Turn on/off delayed delete +#delayed_delete=False + +# Delayed delete time in seconds +#scrub_time=43200 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-scrubber.conf +#scrubber_datadir=/var/lib/glance/scrubber + +# =============== Quota Options ================================== + +# The maximum number of image members allowed per image +#image_member_quota=128 + +# The maximum number of image properties allowed per image +#image_property_quota=128 + +# The maximum number of tags allowed per image +#image_tag_quota=128 + +# The maximum number of locations allowed per image +#image_location_quota=10 + +# Set a system wide quota for every user. This value is the total number +# of bytes that a user can use across all storage systems. A value of +# 0 means unlimited. +#user_storage_quota=0 + +# =============== Image Cache Options ============================= + +# Base directory that the Image Cache uses +#image_cache_dir=/var/lib/glance/image-cache/ + +# =============== Manager Options ================================= + +# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE. +# Whether or not to enforce that all DB tables have charset utf8. +# If your database tables do not have charset utf8 you will +# need to convert before this option is removed. This option is +# only relevant if your database engine is MySQL. +#db_enforce_mysql_charset=True + +# =============== Database Options ================================= + +[database] +# The file name to use with SQLite (string value) +#sqlite_db=glance.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous=True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection=mysql://glance:glance@localhost/glance +connection=mysql://glance:{{ glance_db_password }}@{{ sql_host }}/glance + + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +idle_timeout=200 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect=False + +# seconds between db connection retries (integer value) +#db_retry_interval=1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval=True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval=10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries=20 + +[keystone_authtoken] +auth_host={{ frontend_int_ip }} +auth_port=35357 +auth_protocol=http +admin_tenant_name={{ service_tenant }} +admin_user=glance +admin_password={{ glance_identity_password }} +identity_uri=http://{{ frontend_int_ip }}:35357 +auth_uri=http://{{ frontend_ext_ip }}:5000 + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file=/usr/share/glance/glance-api-dist-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-api-keystone], you would configure the flavor below +# as 'keystone'. +flavor=keystone + +[store_type_location_strategy] +# The scheme list to use to get store preference order. The scheme must be +# registered by one of the stores defined by the 'known_stores' config option. +# This option will be applied when you using 'store_type' option as image +# location strategy defined by the 'location_strategy' config option. +#store_type_preference = diff --git a/tools/ansible-openstack/templates/etc/glance/glance-registry.conf b/tools/ansible-openstack/templates/etc/glance/glance-registry.conf new file mode 100644 index 0000000..0ea6eaf --- /dev/null +++ b/tools/ansible-openstack/templates/etc/glance/glance-registry.conf @@ -0,0 +1,198 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +verbose={{ log_verbose }} + +# Show debugging output in logs (sets DEBUG log level output) +debug={{ log_debug }} + +# Address to bind the registry server +#bind_host=0.0.0.0 + +# Port the bind the registry server to +#bind_port=9191 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +#log_file=/var/log/glance/registry.log + +# Backlog requests when creating socket +#backlog=4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle=600 + +# API to use for accessing data. Default value points to sqlalchemy +# package. +#data_api=glance.db.sqlalchemy.api + +# Enable Registry API versions individually or simultaneously +#enable_v1_registry=True +#enable_v2_registry=True + +# Limit the api to return `param_limit_max` items in a call to a container. If +# a larger `limit` query param is provided, it will be reduced to this value. +#api_limit_max=1000 + +# If a `limit` query param is not provided in an api request, it will +# default to `limit_param_default` +#limit_param_default=25 + +# Role used to identify an authenticated user as administrator +#admin_role=admin + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create=False + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug=True + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog=False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility=LOG_LOCAL1 + +# ================= SSL Options =============================== + +# Certificate file to use when starting registry server securely +#cert_file=/path/to/certfile + +# Private key file to use when starting registry server securely +#key_file=/path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file=/path/to/cafile + +# ================= Database Options ========================== + +[database] +# The file name to use with SQLite (string value) +#sqlite_db=glance.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous=True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection=mysql://glance:glance@localhost/glance +connection=mysql://glance:{{ glance_db_password }}@{{ sql_host }}/glance + + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect=False + +# seconds between db connection retries (integer value) +#db_retry_interval=1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval=True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval=10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries=20 + + +[keystone_authtoken] +auth_host={{ frontend_int_ip }} +auth_port=35357 +auth_protocol=http +admin_tenant_name={{ service_tenant }} +admin_user=glance +admin_password={{ glance_identity_password }} +identity_uri=http://{{ frontend_int_ip }}:35357 +auth_uri=http://{{ frontend_ext_ip }}:5000 + + + + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file=/usr/share/glance/glance-registry-dist-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-registry-keystone], you would configure the flavor below +# as 'keystone'. +flavor=keystone diff --git a/tools/ansible-openstack/templates/etc/init.d/neutron-linuxbridge-agent b/tools/ansible-openstack/templates/etc/init.d/neutron-linuxbridge-agent new file mode 100644 index 0000000..9ac9566 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/init.d/neutron-linuxbridge-agent @@ -0,0 +1,100 @@ +#!/bin/bash +# +# neutron-linuxbridge-agent OpenStack linuxbridge plugin +# +# chkconfig: - 98 02 +# description: Support VLANs using Linux bridging +### END INIT INFO + +. /etc/rc.d/init.d/functions + +proj=neutron +plugin=linuxbridge-agent +prog=$proj-$plugin +exec="/usr/bin/$prog" +configs=( + "/usr/share/$proj/$proj-dist.conf" \ + "/etc/$proj/$proj.conf" \ + "/etc/$proj/plugin.ini" \ +) +pidfile="/var/run/$proj/$prog.pid" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +lockfile=/var/lock/subsys/$prog + +start() { + [ -x $exec ] || exit 5 + for config in ${configs[@]}; do + [ -f $config ] || exit 6 + done + echo -n $"Starting $prog: " + daemon --user neutron --pidfile $pidfile "$exec --log-file /var/log/$proj/$plugin.log ${configs[@]/#/--config-file } &>/dev/null & echo \$! > $pidfile" + retval=$? + echo + [ $retval -eq 0 ] && touch $lockfile + return $retval +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac +exit $? diff --git a/tools/ansible-openstack/templates/etc/keystone/keystone.conf b/tools/ansible-openstack/templates/etc/keystone/keystone.conf new file mode 100644 index 0000000..49dec57 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/keystone/keystone.conf @@ -0,0 +1,1338 @@ +[DEFAULT] + +# +# Options defined in keystone +# + +# A "shared secret" that can be used to bootstrap Keystone. +# This "token" does not represent a user, and carries no +# explicit authorization. To disable in production (highly +# recommended), remove AdminTokenAuthMiddleware from your +# paste application pipelines (for example, in keystone- +# paste.ini). (string value) +#admin_token=ADMIN +admin_token = {{ admin_token }} + + +# The IP Address of the network interface to for the public +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#public_bind_host=0.0.0.0 + +# The IP Address of the network interface to for the admin +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#admin_bind_host=0.0.0.0 + +# The port which the OpenStack Compute service listens on. +# (integer value) +#compute_port=8774 + +# The port number which the admin service listens on. (integer +# value) +#admin_port=35357 + +# The port number which the public service listens on. +# (integer value) +#public_port=5000 + +# The base public endpoint URL for keystone that are +# advertised to clients (NOTE: this does NOT affect how +# keystone listens for connections) (string value). +# Defaults to the base host URL of the request. Eg a +# request to http://server:5000/v2.0/users will +# default to http://server:5000. You should only need +# to set this value if the base URL contains a path +# (eg /prefix/v2.0) or the endpoint should be found on +# a different server. +#public_endpoint=http://localhost:%(public_port)s/ + + +# The base admin endpoint URL for keystone that are advertised +# to clients (NOTE: this does NOT affect how keystone listens +# for connections) (string value). +# Defaults to the base host URL of the request. Eg a +# request to http://server:35357/v2.0/users will +# default to http://server:35357. You should only need +# to set this value if the base URL contains a path +# (eg /prefix/v2.0) or the endpoint should be found on +# a different server. +#admin_endpoint=http://localhost:%(admin_port)s/ + +# enforced by optional sizelimit middleware +# (keystone.middleware:RequestBodySizeLimiter). (integer +# value) +#max_request_body_size=114688 + +# limit the sizes of user & tenant ID/names. (integer value) +#max_param_size=64 + +# similar to max_param_size, but provides an exception for +# token values. (integer value) +#max_token_size=8192 + +# During a SQL upgrade member_role_id will be used to create a +# new role that will replace records in the +# user_tenant_membership table with explicit role grants. +# After migration, the member_role_id will be used in the API +# add_user_to_project. (string value) +#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab + +# During a SQL upgrade member_role_id will be used to create a +# new role that will replace records in the +# user_tenant_membership table with explicit role grants. +# After migration, member_role_name will be ignored. (string +# value) +#member_role_name=_member_ + +# The value passed as the keyword "rounds" to passlib encrypt +# method. (integer value) +#crypt_strength=40000 + +# Set this to True if you want to enable TCP_KEEPALIVE on +# server sockets i.e. sockets used by the keystone wsgi server +# for client connections. (boolean value) +#tcp_keepalive=false + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Only applies if tcp_keepalive is True. Not supported +# on OS X. (integer value) +#tcp_keepidle=600 + +# The maximum number of entities that will be returned in a +# collection can be set with list_limit, with no limit set by +# default. This global limit may be then overridden for a +# specific driver, by specifying a list_limit in the +# appropriate section (e.g. [assignment]). (integer value) +#list_limit= + +# Set this to false if you want to enable the ability for +# user, group and project entities to be moved between domains +# by updating their domain_id. Allowing such movement is not +# recommended if the scope of a domain admin is being +# restricted by use of an appropriate policy file (see +# policy.v3cloudsample as an example). (boolean value) +#domain_id_immutable=true + + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +#rabbit_userid=guest + +# The RabbitMQ password. (string value) +#rabbit_password=guest + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=keystone + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in keystone.notifications +# + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in keystone.middleware.ec2_token +# + +# URL to get token from ec2 request. (string value) +#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens + +# Required if EC2 server requires client certificate. (string +# value) +#keystone_ec2_keyfile= + +# Client certificate key filename. Required if EC2 server +# requires client certificate. (string value) +#keystone_ec2_certfile= + +# A PEM encoded certificate authority to use when verifying +# HTTPS connections. Defaults to the system CAs. (string +# value) +#keystone_ec2_cafile= + +# Disable SSL certificate verification. (boolean value) +#keystone_ec2_insecure=false + + +# +# Options defined in keystone.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in keystone.openstack.common.lockutils +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +#lock_path= + + +# +# Options defined in keystone.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false +debug = {{ log_debug }} + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false +verbose = {{ log_verbose }} + +# Log output to standard error (boolean value) +#use_stderr=false + +# Format string to use for log messages with context (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN + +# Publish error events (boolean value) +#publish_errors=false + +# Make deprecations fatal (boolean value) +#fatal_deprecations=false + +# If an instance is passed with the log message, format it +# like this (string value) +#instance_format="[instance: %(uuid)s] " + +# If an instance UUID is passed with the log message, format +# it like this (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of logging configuration file. It does not disable +# existing loggers, but just appends specified logging +# configuration to any other existing logging options. Please +# see the Python logging module documentation for details on +# logging configuration files. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. (string value) +# If not set here, logging will go to /var/log/keystone/keystone.log, +# default from keystone-dist.conf. +# Deprecated group/name - [DEFAULT]/logfile +#log_file=/var/log/keystone/keystone.log + +# (Optional) The base directory used for relative --log-file +# paths (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and then will be changed in J to honor RFC5424 +# (boolean value) +#use_syslog=false + +# (Optional) Use syslog rfc5424 format for logging. If +# enabled, will add APP-NAME (RFC5424) before the MSG part of +# the syslog message. The old format without APP-NAME is +# deprecated in I, and will be removed in J. (boolean value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in keystone.openstack.common.policy +# + +# JSON file containing policy (string value) +#policy_file=policy.json + +# Rule enforced when requested rule is not found (string +# value) +#policy_default_rule=default + + +[assignment] + +# +# Options defined in keystone +# + +# Keystone Assignment backend driver. (string value) +#driver= + +# Toggle for assignment caching. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# TTL (in seconds) to cache assignment data. This has no +# effect unless global caching is enabled. (integer value) +#cache_time= + +# Maximum number of entities that will be returned in an +# assignment collection. (integer value) +#list_limit= + + +[auth] + +# +# Options defined in keystone +# + +# Default auth methods. (list value) +#methods=external,password,token + +# The password auth plugin module. (string value) +#password=keystone.auth.plugins.password.Password + +# The token auth plugin module. (string value) +#token=keystone.auth.plugins.token.Token + +# The external (REMOTE_USER) auth plugin module. (string +# value) +#external=keystone.auth.plugins.external.DefaultDomain + + +[cache] + +# +# Options defined in keystone +# + +# Prefix for building the configuration dictionary for the +# cache region. This should not need to be changed unless +# there is another dogpile.cache region with the same +# configuration name. (string value) +#config_prefix=cache.keystone + +# Default TTL, in seconds, for any cached item in the +# dogpile.cache region. This applies to any cached method that +# doesn't have an explicit cache expiration time defined for +# it. (integer value) +#expiration_time=600 + +# Dogpile.cache backend module. It is recommended that +# Memcache (dogpile.cache.memcache) or Redis +# (dogpile.cache.redis) be used in production deployments. +# Small workloads (single process) like devstack can use the +# dogpile.cache.memory backend. (string value) +#backend=keystone.common.cache.noop + +# Use a key-mangling function (sha1) to ensure fixed length +# cache-keys. This is toggle-able for debugging purposes, it +# is highly recommended to always leave this set to True. +# (boolean value) +#use_key_mangler=true + +# Arguments supplied to the backend module. Specify this +# option once per argument to be passed to the dogpile.cache +# backend. Example format: ":". (multi valued) +#backend_argument= + +# Proxy Classes to import that will affect the way the +# dogpile.cache backend functions. See the dogpile.cache +# documentation on changing-backend-behavior. Comma delimited +# list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2. +# (list value) +#proxies= + +# Global toggle for all caching using the should_cache_fn +# mechanism. (boolean value) +#enabled=false + +# Extra debugging from the cache backend (cache keys, +# get/set/delete/etc calls) This is only really useful if you +# need to see the specific cache-backend get/set/delete calls +# with the keys/values. Typically this should be left set to +# False. (boolean value) +#debug_cache_backend=false + + +[catalog] + +# +# Options defined in keystone +# + +# Catalog template file name for use with the template catalog +# backend. (string value) +#template_file=/etc/keystone/default_catalog.templates + +# Keystone catalog backend driver. (string value) +#driver=keystone.catalog.backends.sql.Catalog + +# Maximum number of entities that will be returned in a +# catalog collection. (integer value) +#list_limit= + + +[credential] + +# +# Options defined in keystone +# + +# Keystone Credential backend driver. (string value) +#driver=keystone.credential.backends.sql.Credential + + +[database] + +# +# Options defined in keystone.openstack.common.db.options +# + +# The file name to use with SQLite (string value) +#sqlite_db=keystone.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous=true + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection=mysql://keystone:keystone@localhost/keystone +connection=mysql://keystone:{{ keystone_db_password }}@{{ sql_host }}/keystone + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 +idle_timeout=200 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect=false + +# seconds between db connection retries (integer value) +#db_retry_interval=1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval=true + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval=10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries=20 + + +[ec2] + +# +# Options defined in keystone +# + +# Keystone EC2Credential backend driver. (string value) +#driver=keystone.contrib.ec2.backends.sql.Ec2 + + +[endpoint_filter] + +# +# Options defined in keystone +# + +# Keystone Endpoint Filter backend driver (string value) +#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter + +# Toggle to return all active endpoints if no filter exists. +# (boolean value) +#return_all_endpoints_if_no_filter=true + + +[federation] + +# +# Options defined in keystone +# + +# Keystone Federation backend driver. (string value) +#driver=keystone.contrib.federation.backends.sql.Federation + +# Value to be used when filtering assertion parameters from +# the environment. (string value) +#assertion_prefix= + + +[identity] + +# +# Options defined in keystone +# + +# This references the domain to use for all Identity API v2 +# requests (which are not aware of domains). A domain with +# this ID will be created for you by keystone-manage db_sync +# in migration 008. The domain referenced by this ID cannot +# be deleted on the v3 API, to prevent accidentally breaking +# the v2 API. There is nothing special about this domain, +# other than the fact that it must exist to order to maintain +# support for your v2 clients. (string value) +#default_domain_id=default + +# A subset (or all) of domains can have their own identity +# driver, each with their own partial configuration file in a +# domain configuration directory. Only values specific to the +# domain need to be placed in the domain specific +# configuration file. This feature is disabled by default; set +# to True to enable. (boolean value) +#domain_specific_drivers_enabled=false + +# Path for Keystone to locate the domain specificidentity +# configuration files if domain_specific_drivers_enabled is +# set to true. (string value) +#domain_config_dir=/etc/keystone/domains + +# Keystone Identity backend driver. (string value) +#driver=keystone.identity.backends.sql.Identity + +# Maximum supported length for user passwords; decrease to +# improve performance. (integer value) +#max_password_length=4096 + +# Maximum number of entities that will be returned in an +# identity collection. (integer value) +#list_limit= + + +[kvs] + +# +# Options defined in keystone +# + +# Extra dogpile.cache backend modules to register with the +# dogpile.cache library. (list value) +#backends= + +# Prefix for building the configuration dictionary for the KVS +# region. This should not need to be changed unless there is +# another dogpile.cache region with the same configuration +# name. (string value) +#config_prefix=keystone.kvs + +# Toggle to disable using a key-mangling function to ensure +# fixed length keys. This is toggle-able for debugging +# purposes, it is highly recommended to always leave this set +# to True. (boolean value) +#enable_key_mangler=true + +# Default lock timeout for distributed locking. (integer +# value) +#default_lock_timeout=5 + + +[ldap] + +# +# Options defined in keystone +# + +# URL for connecting to the LDAP server. (string value) +#url=ldap://localhost + +# User BindDN to query the LDAP server. (string value) +#user= + +# Password for the BindDN to query the LDAP server. (string +# value) +#password= + +# LDAP server suffix (string value) +#suffix=cn=example,cn=com + +# If true, will add a dummy member to groups. This is required +# if the objectclass for groups requires the "member" +# attribute. (boolean value) +#use_dumb_member=false + +# DN of the "dummy member" to use when "use_dumb_member" is +# enabled. (string value) +#dumb_member=cn=dumb,dc=nonexistent + +# allow deleting subtrees. (boolean value) +#allow_subtree_delete=false + +# The LDAP scope for queries, this can be either "one" +# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). +# (string value) +#query_scope=one + +# Maximum results per page; a value of zero ("0") disables +# paging. (integer value) +#page_size=0 + +# The LDAP dereferencing option for queries. This can be +# either "never", "searching", "always", "finding" or +# "default". The "default" option falls back to using default +# dereferencing configured by your ldap.conf. (string value) +#alias_dereferencing=default + +# Override the system's default referral chasing behavior for +# queries. (boolean value) +#chase_referrals= + +# Search base for users. (string value) +#user_tree_dn= + +# LDAP search filter for users. (string value) +#user_filter= + +# LDAP objectClass for users. (string value) +#user_objectclass=inetOrgPerson + +# LDAP attribute mapped to user id. (string value) +#user_id_attribute=cn + +# LDAP attribute mapped to user name. (string value) +#user_name_attribute=sn + +# LDAP attribute mapped to user email. (string value) +#user_mail_attribute=email + +# LDAP attribute mapped to password. (string value) +#user_pass_attribute=userPassword + +# LDAP attribute mapped to user enabled flag. (string value) +#user_enabled_attribute=enabled + +# Bitmask integer to indicate the bit that the enabled value +# is stored in if the LDAP server represents "enabled" as a +# bit on an integer rather than a boolean. A value of "0" +# indicates the mask is not used. If this is not set to "0" +# the typical value is "2". This is typically used when +# "user_enabled_attribute = userAccountControl". (integer +# value) +#user_enabled_mask=0 + +# Default value to enable users. This should match an +# appropriate int value if the LDAP server uses non-boolean +# (bitmask) values to indicate if a user is enabled or +# disabled. If this is not set to "True"the typical value is +# "512". This is typically used when "user_enabled_attribute = +# userAccountControl". (string value) +#user_enabled_default=True + +# List of attributes stripped off the user on update. (list +# value) +#user_attribute_ignore=default_project_id,tenants + +# LDAP attribute mapped to default_project_id for users. +# (string value) +#user_default_project_id_attribute= + +# Allow user creation in LDAP backend. (boolean value) +#user_allow_create=true + +# Allow user updates in LDAP backend. (boolean value) +#user_allow_update=true + +# Allow user deletion in LDAP backend. (boolean value) +#user_allow_delete=true + +# If True, Keystone uses an alternative method to determine if +# a user is enabled or not by checking if they are a member of +# the "user_enabled_emulation_dn" group. (boolean value) +#user_enabled_emulation=false + +# DN of the group entry to hold enabled users when using +# enabled emulation. (string value) +#user_enabled_emulation_dn= + +# List of additional LDAP attributes used for mapping +# Additional attribute mappings for users. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#user_additional_attribute_mapping= + +# Search base for projects (string value) +#tenant_tree_dn= + +# LDAP search filter for projects. (string value) +#tenant_filter= + +# LDAP objectClass for projects. (string value) +#tenant_objectclass=groupOfNames + +# LDAP attribute mapped to project id. (string value) +#tenant_id_attribute=cn + +# LDAP attribute mapped to project membership for user. +# (string value) +#tenant_member_attribute=member + +# LDAP attribute mapped to project name. (string value) +#tenant_name_attribute=ou + +# LDAP attribute mapped to project description. (string value) +#tenant_desc_attribute=description + +# LDAP attribute mapped to project enabled. (string value) +#tenant_enabled_attribute=enabled + +# LDAP attribute mapped to project domain_id. (string value) +#tenant_domain_id_attribute=businessCategory + +# List of attributes stripped off the project on update. (list +# value) +#tenant_attribute_ignore= + +# Allow tenant creation in LDAP backend. (boolean value) +#tenant_allow_create=true + +# Allow tenant update in LDAP backend. (boolean value) +#tenant_allow_update=true + +# Allow tenant deletion in LDAP backend. (boolean value) +#tenant_allow_delete=true + +# If True, Keystone uses an alternative method to determine if +# a project is enabled or not by checking if they are a member +# of the "tenant_enabled_emulation_dn" group. (boolean value) +#tenant_enabled_emulation=false + +# DN of the group entry to hold enabled projects when using +# enabled emulation. (string value) +#tenant_enabled_emulation_dn= + +# Additional attribute mappings for projects. Attribute +# mapping format is :, where ldap_attr +# is the attribute in the LDAP entry and user_attr is the +# Identity API attribute. (list value) +#tenant_additional_attribute_mapping= + +# Search base for roles. (string value) +#role_tree_dn= + +# LDAP search filter for roles. (string value) +#role_filter= + +# LDAP objectClass for roles. (string value) +#role_objectclass=organizationalRole + +# LDAP attribute mapped to role id. (string value) +#role_id_attribute=cn + +# LDAP attribute mapped to role name. (string value) +#role_name_attribute=ou + +# LDAP attribute mapped to role membership. (string value) +#role_member_attribute=roleOccupant + +# List of attributes stripped off the role on update. (list +# value) +#role_attribute_ignore= + +# Allow role creation in LDAP backend. (boolean value) +#role_allow_create=true + +# Allow role update in LDAP backend. (boolean value) +#role_allow_update=true + +# Allow role deletion in LDAP backend. (boolean value) +#role_allow_delete=true + +# Additional attribute mappings for roles. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#role_additional_attribute_mapping= + +# Search base for groups. (string value) +#group_tree_dn= + +# LDAP search filter for groups. (string value) +#group_filter= + +# LDAP objectClass for groups. (string value) +#group_objectclass=groupOfNames + +# LDAP attribute mapped to group id. (string value) +#group_id_attribute=cn + +# LDAP attribute mapped to group name. (string value) +#group_name_attribute=ou + +# LDAP attribute mapped to show group membership. (string +# value) +#group_member_attribute=member + +# LDAP attribute mapped to group description. (string value) +#group_desc_attribute=description + +# List of attributes stripped off the group on update. (list +# value) +#group_attribute_ignore= + +# Allow group creation in LDAP backend. (boolean value) +#group_allow_create=true + +# Allow group update in LDAP backend. (boolean value) +#group_allow_update=true + +# Allow group deletion in LDAP backend. (boolean value) +#group_allow_delete=true + +# Additional attribute mappings for groups. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#group_additional_attribute_mapping= + +# CA certificate file path for communicating with LDAP +# servers. (string value) +#tls_cacertfile= + +# CA certificate directory path for communicating with LDAP +# servers. (string value) +#tls_cacertdir= + +# Enable TLS for communicating with LDAP servers. (boolean +# value) +#use_tls=false + +# valid options for tls_req_cert are demand, never, and allow. +# (string value) +#tls_req_cert=demand + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[memcache] + +# +# Options defined in keystone +# + +# Memcache servers in the format of "host:port" (list value) +#servers=localhost:11211 + +# Number of compare-and-set attempts to make when using +# compare-and-set in the token memcache back end. (integer +# value) +#max_compare_and_set_retry=16 + + +[oauth1] + +# +# Options defined in keystone +# + +# Keystone Credential backend driver. (string value) +#driver=keystone.contrib.oauth1.backends.sql.OAuth1 + +# Duration (in seconds) for the OAuth Request Token. (integer +# value) +#request_token_duration=28800 + +# Duration (in seconds) for the OAuth Access Token. (integer +# value) +#access_token_duration=86400 + + +[os_inherit] + +# +# Options defined in keystone +# + +# role-assignment inheritance to projects from owning domain +# can be optionally enabled. (boolean value) +#enabled=false + + +[paste_deploy] + +# +# Options defined in keystone +# + +# Name of the paste configuration file that defines the +# available pipelines. (string value) +#config_file=/usr/share/keystone/keystone-dist-paste.ini + + +[policy] + +# +# Options defined in keystone +# + +# Keystone Policy backend driver. (string value) +#driver=keystone.policy.backends.sql.Policy + +# Maximum number of entities that will be returned in a policy +# collection. (integer value) +#list_limit= + + +[revoke] + +# +# Options defined in keystone +# + +# An implementation of the backend for persisting revocation +# events. (string value) +#driver=keystone.contrib.revoke.backends.kvs.Revoke + +# This value (calculated in seconds) is added to token +# expiration before a revocation event may be removed from the +# backend. (integer value) +#expiration_buffer=1800 + +# Toggle for revocation event cacheing. This has no effect +# unless global caching is enabled. (boolean value) +#caching=true + + +[signing] + +# +# Options defined in keystone +# + +# Deprecated in favor of provider in the [token] section. +# (string value) +#token_format= + +# Path of the certfile for token signing. (string value) +#certfile=/etc/keystone/ssl/certs/signing_cert.pem + +# Path of the keyfile for token signing. (string value) +#keyfile=/etc/keystone/ssl/private/signing_key.pem + +# Path of the CA for token signing. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA Key for token signing. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Key Size (in bits) for token signing cert (auto generated +# certificate). (integer value) +#key_size=2048 + +# Day the token signing cert is valid for (auto generated +# certificate). (integer value) +#valid_days=3650 + +# Certificate Subject (auto generated certificate) for token +# signing. (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com + + +[ssl] + +# +# Options defined in keystone +# + +# Toggle for SSL support on the keystone eventlet servers. +# (boolean value) +#enable=false + +# Path of the certfile for SSL. (string value) +#certfile=/etc/keystone/ssl/certs/keystone.pem + +# Path of the keyfile for SSL. (string value) +#keyfile=/etc/keystone/ssl/private/keystonekey.pem + +# Path of the ca cert file for SSL. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key file for SSL. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Require client certificate. (boolean value) +#cert_required=false + +# SSL Key Length (in bits) (auto generated certificate). +# (integer value) +#key_size=1024 + +# Days the certificate is valid for once signed (auto +# generated certificate). (integer value) +#valid_days=3650 + +# SSL Certificate Subject (auto generated certificate). +# (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost + + +[stats] + +# +# Options defined in keystone +# + +# Keystone stats backend driver. (string value) +#driver=keystone.contrib.stats.backends.kvs.Stats + + +[token] + +# +# Options defined in keystone +# + +# External auth mechanisms that should add bind information to +# token e.g. kerberos, x509. (list value) +#bind= + +# Enforcement policy on tokens presented to keystone with bind +# information. One of disabled, permissive, strict, required +# or a specifically required bind mode e.g. kerberos or x509 +# to require binding to that authentication. (string value) +#enforce_token_bind=permissive + +# Amount of time a token should remain valid (in seconds). +# (integer value) +#expiration=3600 +expiration=86400 + +# Controls the token construction, validation, and revocation +# operations. Core providers are +# "keystone.token.providers.[pki|uuid].Provider". (string +# value) +#provider= + +# Keystone Token persistence backend driver. (string value) +#driver=keystone.token.backends.sql.Token + +# Toggle for token system cacheing. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# Time to cache the revocation list and the revocation events +# if revoke extension is enabled (in seconds). This has no +# effect unless global and token caching are enabled. (integer +# value) +#revocation_cache_time=3600 + +# Time to cache tokens (in seconds). This has no effect unless +# global and token caching are enabled. (integer value) +#cache_time= + +# Revoke token by token identifier. Setting revoke_by_id to +# True enables various forms of enumerating tokens, e.g. `list +# tokens for user`. These enumerations are processed to +# determine the list of tokens to revoke. Only disable if +# you are switching to using the Revoke extension with a +# backend other than KVS, which stores events in memory. +# (boolean value) +#revoke_by_id=true + + +[trust] + +# +# Options defined in keystone +# + +# delegation and impersonation features can be optionally +# disabled. (boolean value) +#enabled=true + +# Keystone Trust backend driver. (string value) +#driver=keystone.trust.backends.sql.Trust + + diff --git a/tools/ansible-openstack/templates/etc/my.cnf b/tools/ansible-openstack/templates/etc/my.cnf new file mode 100644 index 0000000..2d6cf32 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/my.cnf @@ -0,0 +1,46 @@ +[client] +port = 3306 +socket = /var/lib/mysql/mysql.sock +[mysqld_safe] +socket = /var/lib/mysql/mysql.sock +nice = 0 +[mysqld] +user = mysql +pid-file = /var/run/mysqld/mysqld.pid +socket = /var/lib/mysql/mysql.sock +port = 3306 +basedir = /usr +datadir = /var/lib/mysql +tmpdir = /tmp +skip-external-locking + + +bind-address = 0.0.0.0 + + +key_buffer = 16M +max_allowed_packet = 16M +thread_stack = 192K +thread_cache_size = 8 +myisam-recover = BACKUP +query_cache_limit = 1M +query_cache_size = 16M +log_error = /var/log/mysqld.log +expire_logs_days = 10 +max_binlog_size = 100M + +default-storage-engine = InnoDB +innodb_file_per_table +collation-server = utf8_general_ci +init-connect = 'SET NAMES utf8' +character-set-server = utf8 + +[mysqldump] +quick +quote-names +max_allowed_packet = 16M +[mysql] +[isamchk] +key_buffer = 16M + + diff --git a/tools/ansible-openstack/templates/etc/neutron/dhcp_agent.ini b/tools/ansible-openstack/templates/etc/neutron/dhcp_agent.ini new file mode 100644 index 0000000..91e47b7 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/neutron/dhcp_agent.ini @@ -0,0 +1,90 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = False + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +resync_interval = 30 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +interface_driver =neutron.agent.linux.interface.BridgeInterfaceDriver + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +# use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +# enable_isolated_metadata = False + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +# enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +# dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +# dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 +root_helper=sudo neutron-rootwrap /etc/neutron/rootwrap.conf +state_path=/var/lib/neutron diff --git a/tools/ansible-openstack/templates/etc/neutron/l3_agent.ini b/tools/ansible-openstack/templates/etc/neutron/l3_agent.ini new file mode 100644 index 0000000..18d2f58 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/neutron/l3_agent.ini @@ -0,0 +1,80 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = False + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +external_network_bridge = + +# TCP Port used by Neutron metadata server +# metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +# send_arp_for_ha = 0 + +# seconds between re-sync routers' data if needed +# periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +# periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False +gateway_external_network_id = + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/tools/ansible-openstack/templates/etc/neutron/lbaas_agent.ini b/tools/ansible-openstack/templates/etc/neutron/lbaas_agent.ini new file mode 100644 index 0000000..ebb7abf --- /dev/null +++ b/tools/ansible-openstack/templates/etc/neutron/lbaas_agent.ini @@ -0,0 +1,41 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output). +debug = False +verbose = True + +# The LBaaS agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +# periodic_interval = 10 + +# LBaas requires an interface driver be set. Choose the one that best +# matches your plugin. +interface_driver =neutron.agent.linux.interface.BridgeInterfaceDriver + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version. +# Multiple device drivers reflecting different service providers could be specified: +# device_driver = path.to.provider1.driver.Driver +# device_driver = path.to.provider2.driver.Driver +# Default is: +device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver +use_namespaces=True + +[haproxy] +# Location to store config and state files +# loadbalancer_state_path = $state_path/lbaas + +# The user group +# user_group = nogroup +user_group = haproxy diff --git a/tools/ansible-openstack/templates/etc/neutron/metadata_agent.ini b/tools/ansible-openstack/templates/etc/neutron/metadata_agent.ini new file mode 100644 index 0000000..3c7a536 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/neutron/metadata_agent.ini @@ -0,0 +1,38 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = False + +# The Neutron user information for accessing the Neutron API. +auth_url = {{ keystone_internal_url }} +auth_region = {{ openstack_region }} +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = {{ service_tenant }} +admin_user = neutron +admin_password = {{ neutron_identity_password }} + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +# nova_metadata_ip = {{ nova_metadata_host }} + +# TCP Port used by Nova metadata server +# nova_metadata_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +# metadata_proxy_shared_secret = + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server +# metadata_workers = 0 + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 128 diff --git a/tools/ansible-openstack/templates/etc/neutron/neutron.conf b/tools/ansible-openstack/templates/etc/neutron/neutron.conf new file mode 100644 index 0000000..27c88f2 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/neutron/neutron.conf @@ -0,0 +1,472 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ log_verbose }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ log_debug }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +# state_path = /var/lib/neutron + +# Where to store lock files +# lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = False +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +# bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +core_plugin = ml2 +# Example: core_plugin = ml2 + + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +service_plugins =router,firewall,lbaas,metering +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +# Paste configuration file +# api_paste_config = /usr/share/neutron/api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +# dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +# allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +rpc_backend = neutron.openstack.common.rpc.impl_kombu +# Size of RPC thread pool +# rpc_thread_pool_size = 64 +# Size of RPC connection pool +# rpc_conn_pool_size = 30 +# Seconds to wait for a response from call or multicall +# rpc_response_timeout = 60 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# rpc_cast_timeout = 30 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# IP address of the RabbitMQ installation +rabbit_hosts={{ amqp_host }}:5672 +# Password of the RabbitMQ server +rabbit_password={{ amqp_pass }} +# Port where RabbitMQ server is running/listening +# rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid={{ amqp_user }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false + +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +# notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +# default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +# notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ frontend_int_ip }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = {{ openstack_region }} + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant +nova_admin_tenant_id = {{ hostvars[groups['frontend'][0]]['admin_tenantID'] }} + + +# Password for connection to nova in admin context. +nova_admin_password = {{ nova_identity_password }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ frontend_int_ip }}:35357/v2.0 + + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +# default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# quota_network = 10 + +# Number of subnets allowed per tenant. A negative value means unlimited. +# quota_subnet = 10 + +# Number of ports allowed per tenant. A negative value means unlimited. +# quota_port = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +# quota_security_group = 10 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +# quota_security_group_rule = 100 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +# root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_host={{ frontend_int_ip }} +auth_port=35357 +auth_protocol=http +admin_tenant_name={{ service_tenant }} +admin_user=neutron +admin_password={{ neutron_identity_password }} +identity_uri=http://{{ frontend_int_ip }}:35357 +auth_uri=http://{{ frontend_ext_ip }}:5000 + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:// +connection=mysql://neutron:{{ neutron_db_password }}@{{ sql_host }}/neutron + + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 +idle_timeout = 200 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +# service_provider = LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +#service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/tools/ansible-openstack/templates/etc/neutron/ovs_neutron_plugin.ini b/tools/ansible-openstack/templates/etc/neutron/ovs_neutron_plugin.ini new file mode 100644 index 0000000..b45aff9 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/neutron/ovs_neutron_plugin.ini @@ -0,0 +1,172 @@ +[ovs] +# (StrOpt) Type of network to allocate for tenant networks. The +# default value 'local' is useful only for single-box testing and +# provides no connectivity between hosts. You MUST either change this +# to 'vlan' and configure network_vlan_ranges below or change this to +# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for +# tenant networks to provide connectivity between hosts. Set to 'none' +# to disable creation of tenant networks. +# +# tenant_network_type = local +# Example: tenant_network_type = gre +# Example: tenant_network_type = vxlan + +# (ListOpt) Comma-separated list of +# [::] tuples enumerating ranges +# of VLAN IDs on named physical networks that are available for +# allocation. All physical networks listed are available for flat and +# VLAN provider network creation. Specified ranges of VLAN IDs are +# available for tenant network allocation if tenant_network_type is +# 'vlan'. If empty, only gre, vxlan and local networks may be created. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999 + +# (BoolOpt) Set to True in the server and the agents to enable support +# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and +# GRE or VXLAN tunneling. +# +# WARNING: This option will be deprecated in the Icehouse release, at which +# point setting tunnel_type below will be required to enable +# tunneling. +# +# enable_tunneling = False + +# (StrOpt) The type of tunnel network, if any, supported by the plugin. If +# this is set, it will cause tunneling to be enabled. If this is not set and +# the option enable_tunneling is set, this will default to 'gre'. +# +# tunnel_type = +# Example: tunnel_type = gre +# Example: tunnel_type = vxlan + +# (ListOpt) Comma-separated list of : tuples +# enumerating ranges of GRE or VXLAN tunnel IDs that are available for +# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'. +# +# tunnel_id_ranges = +# Example: tunnel_id_ranges = 1:1000 + +# Do not change this parameter unless you have a good reason to. +# This is the name of the OVS integration bridge. There is one per hypervisor. +# The integration bridge acts as a virtual "patch bay". All VM VIFs are +# attached to this bridge and then "patched" according to their network +# connectivity. +# +# integration_bridge = br-int + +# Only used for the agent if tunnel_id_ranges (above) is not empty for +# the server. In most cases, the default value should be fine. +# +# tunnel_bridge = br-tun + +# Peer patch port in integration bridge for tunnel bridge +# int_peer_patch_port = patch-tun + +# Peer patch port in tunnel bridge for integration bridge +# tun_peer_patch_port = patch-int + +# Uncomment this line for the agent if tunnel_id_ranges (above) is not +# empty for the server. Set local-ip to be the local IP address of +# this hypervisor. +# +# local_ip = + +# (ListOpt) Comma-separated list of : tuples +# mapping physical network names to the agent's node-specific OVS +# bridge names to be used for flat and VLAN networks. The length of +# bridge names should be no more than 11. Each bridge must +# exist, and should have a physical network interface configured as a +# port. All physical networks listed in network_vlan_ranges on the +# server should have mappings to appropriate bridges on each agent. +# +# bridge_mappings = +# Example: bridge_mappings = physnet1:br-eth1 + +[agent] +# Agent's polling interval in seconds +# polling_interval = 2 + +# (ListOpt) The types of tenant network tunnels supported by the agent. +# Setting this will enable tunneling support in the agent. This can be set to +# either 'gre' or 'vxlan'. If this is unset, it will default to [] and +# disable tunneling support in the agent. When running the agent with the OVS +# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section. +# When running the agent with ML2, you can specify as many values here as +# your compute hosts supports. +# +# tunnel_types = +# Example: tunnel_types = gre +# Example: tunnel_types = vxlan +# Example: tunnel_types = vxlan, gre + +# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By +# default, this will make use of the Open vSwitch default value of '4789' if +# not specified. +# +# vxlan_udp_port = +# Example: vxlan_udp_port = 8472 + +# (IntOpt) This is the MTU size of veth interfaces. +# Do not change unless you have a good reason to. +# The default MTU size of veth interfaces is 1500. +# veth_mtu = +# Example: veth_mtu = 1504 + +# (BoolOpt) Flag to enable l2-population extension. This option should only be +# used in conjunction with ml2 plugin and l2population mechanism driver. It'll +# enable plugin to populate remote ports macs and IPs (using fdb_add/remove +# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to +# optimize tunnel management. +# +# l2_population = False + +[securitygroup] +# Firewall driver for realizing neutron security group function. +# firewall_driver = neutron.agent.firewall.NoopFirewallDriver +# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver + +#----------------------------------------------------------------------------- +# Sample Configurations. +#----------------------------------------------------------------------------- +# +# 1. With VLANs on eth1. +# [database] +# connection = mysql://root:nova@127.0.0.1:3306/ovs_neutron +# [OVS] +# network_vlan_ranges = default:2000:3999 +# tunnel_id_ranges = +# integration_bridge = br-int +# bridge_mappings = default:br-eth1 +# [AGENT] +# Add the following setting, if you want to log to a file +# +# 2. With tunneling. +# [database] +# connection = mysql://root:nova@127.0.0.1:3306/ovs_neutron +# [OVS] +# network_vlan_ranges = +# tunnel_id_ranges = 1:1000 +# integration_bridge = br-int +# tunnel_bridge = br-tun +# local_ip = 10.0.0.3 + +[OVS] +tunnel_id_ranges=1:1000 +tenant_network_type=gre +local_ip={{ my_int_ip }} +enable_tunneling=True +integration_bridge=br-int +tunnel_bridge=br-tun + +[DATABASE] +sql_connection = mysql://neutron:{{ neutron_db_password }}@{{ sql_ip }}:3306/neutron +sql_max_retries = 10 +reconnect_interval = 2 +sql_idle_timeout = 3600 + +[AGENT] +polling_interval=2 + +[SECURITYGROUP] +firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver diff --git a/tools/ansible-openstack/templates/etc/neutron/plugins/ml2/ml2_conf_linuxbridge_VLAN.ini b/tools/ansible-openstack/templates/etc/neutron/plugins/ml2/ml2_conf_linuxbridge_VLAN.ini new file mode 100644 index 0000000..5f354a1 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/neutron/plugins/ml2/ml2_conf_linuxbridge_VLAN.ini @@ -0,0 +1,76 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +type_drivers = flat,vlan,gre,vxlan +tenant_network_types = vlan +mechanism_drivers = linuxbridge + + +# Example: type_drivers = flat,vlan,gre,vxlan + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade + + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +# flat_networks = +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * +flat_networks = phys_external + + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +# network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 +network_vlan_ranges = phys_internal:{{ VLAN_RANGE }},phys_external + + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +# tunnel_id_ranges = + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +# vni_ranges = + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +# vxlan_group = +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +enable_security_group = True +firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver + +[LINUX_BRIDGE] +physical_interface_mappings = phys_internal:{{ my_int_if }},phys_external:{{ my_ext_if }} + diff --git a/tools/ansible-openstack/templates/etc/nova/nova.conf b/tools/ansible-openstack/templates/etc/nova/nova.conf new file mode 100644 index 0000000..ce0d625 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/nova/nova.conf @@ -0,0 +1,3706 @@ +[DEFAULT] + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group;name - DEFAULT;rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +rabbit_hosts={{ amqp_host }}:5672 + +# Connect over SSL for RabbitMQ. (boolean value) +rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid={{ amqp_user }} + +# The RabbitMQ password. (string value) +rabbit_password={{ amqp_pass }} + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=nova + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group;name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +rpc_backend=nova.openstack.common.rpc.impl_kombu + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in nova.availability_zones +# + +# The availability_zone to show internal services under +# (string value) +#internal_service_availability_zone=internal + +# Default compute node availability_zone (string value) +#default_availability_zone=nova + + +# +# Options defined in nova.crypto +# + +# Filename of root CA (string value) +#ca_file=cacert.pem + +# Filename of private key (string value) +#key_file=private/cakey.pem + +# Filename of root Certificate Revocation List (string value) +#crl_file=crl.pem + +# Where we keep our keys (string value) +#keys_path=$state_path/keys + +# Where we keep our root CA (string value) +#ca_path=$state_path/CA + +# Should we use a CA for each project? (boolean value) +#use_project_ca=false + +# Subject for certificate for users, %s for project, user, +# timestamp (string value) +#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s + +# Subject for certificate for projects, %s for project, +# timestamp (string value) +#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s + + +# +# Options defined in nova.exception +# + +# Make exception message format errors fatal (boolean value) +#fatal_exception_format_errors=false + + +# +# Options defined in nova.netconf +# + +# IP address of this host (string value) +my_ip={{ my_int_ip }} + +# Name of this node. This can be an opaque identifier. It is +# not necessarily a hostname, FQDN, or IP address. However, +# the node name must be valid within an AMQP key, and if using +# ZeroMQ, a valid hostname, FQDN, or IP address (string value) +#host=nova + +# Use IPv6 (boolean value) +#use_ipv6=false + + +# +# Options defined in nova.notifications +# + +# If set, send compute.instance.update notifications on +# instance state changes. Valid values are None for no +# notifications, "vm_state" for notifications on VM state +# changes, or "vm_and_task_state" for notifications on VM and +# task state changes. (string value) +#notify_on_state_change= + +# If set, send api.fault notifications on caught exceptions in +# the API service. (boolean value) +#notify_api_faults=false + +# Default notification level for outgoing notifications +# (string value) +#default_notification_level=INFO + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in nova.paths +# + +# Directory where the nova python module is installed (string +# value) +#pybasedir=/usr/lib/python/site-packages + +# Directory where nova binaries are installed (string value) +#bindir=/usr/local/bin + +# Top-level directory for maintaining nova's state (string +# value) +#state_path=/var/lib/nova + + +# +# Options defined in nova.policy +# + +# JSON file representing policy (string value) +#policy_file=policy.json + +# Rule checked when requested rule is not found (string value) +#policy_default_rule=default + + +# +# Options defined in nova.quota +# + +# Number of instances allowed per project (integer value) +#quota_instances=10 + +# Number of instance cores allowed per project (integer value) +#quota_cores=20 + +# Megabytes of instance RAM allowed per project (integer +# value) +#quota_ram=51200 + +# Number of floating IPs allowed per project (integer value) +#quota_floating_ips=10 + +# Number of fixed IPs allowed per project (this should be at +# least the number of instances allowed) (integer value) +#quota_fixed_ips=-1 + +# Number of metadata items allowed per instance (integer +# value) +#quota_metadata_items=128 + +# Number of injected files allowed (integer value) +#quota_injected_files=5 + +# Number of bytes allowed per injected file (integer value) +#quota_injected_file_content_bytes=10240 + +# Number of bytes allowed per injected file path (integer +# value) +#quota_injected_file_path_bytes=255 + +# Number of security groups per project (integer value) +#quota_security_groups=10 + +# Number of security rules per security group (integer value) +#quota_security_group_rules=20 + +# Number of key pairs per user (integer value) +#quota_key_pairs=100 + +# Number of seconds until a reservation expires (integer +# value) +#reservation_expire=86400 + +# Count of reservations until usage is refreshed (integer +# value) +#until_refresh=0 + +# Number of seconds between subsequent usage refreshes +# (integer value) +#max_age=0 + +# Default driver to use for quota checks (string value) +#quota_driver=nova.quota.DbQuotaDriver + + +# +# Options defined in nova.service +# + +# Seconds between nodes reporting state to datastore (integer +# value) +#report_interval=10 + +# Enable periodic tasks (boolean value) +#periodic_enable=true + +# Range of seconds to randomly delay when starting the +# periodic task scheduler to reduce stampeding. (Disable by +# setting to 0) (integer value) +#periodic_fuzzy_delay=60 + +# A list of APIs to enable by default (list value) +#enabled_apis=ec2,osapi_compute,metadata + +# A list of APIs with enabled SSL (list value) +#enabled_ssl_apis= + +# The IP address on which the EC2 API will listen. (string +# value) +#ec2_listen=0.0.0.0 + +# The port on which the EC2 API will listen. (integer value) +#ec2_listen_port=8773 + +# Number of workers for EC2 API service. The default will be +# equal to the number of CPUs available. (integer value) +#ec2_workers= + +# The IP address on which the OpenStack API will listen. +# (string value) +#osapi_compute_listen=0.0.0.0 + +# The port on which the OpenStack API will listen. (integer +# value) +#osapi_compute_listen_port=8774 + +# Number of workers for OpenStack API service. The default +# will be the number of CPUs available. (integer value) +#osapi_compute_workers= + +# OpenStack metadata service manager (string value) +#metadata_manager=nova.api.manager.MetadataManager + +# The IP address on which the metadata API will listen. +# (string value) +metadata_listen=0.0.0.0 + +# The port on which the metadata API will listen. (integer +# value) +#metadata_listen_port=8775 + +# Number of workers for metadata service. The default will be +# the number of CPUs available. (integer value) +#metadata_workers= + +# Full class name for the Manager for compute (string value) +#compute_manager=nova.compute.manager.ComputeManager + +# Full class name for the Manager for console proxy (string +# value) +#console_manager=nova.console.manager.ConsoleProxyManager + +# Manager for console auth (string value) +#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager + +# Full class name for the Manager for cert (string value) +#cert_manager=nova.cert.manager.CertManager + +# Full class name for the Manager for network (string value) +#network_manager=nova.network.manager.FlatDHCPManager + +# Full class name for the Manager for scheduler (string value) +#scheduler_manager=nova.scheduler.manager.SchedulerManager + +# Maximum time since last check-in for up service (integer +# value) +#service_down_time=60 + + +# +# Options defined in nova.test +# + +# File name of clean sqlite db (string value) +#sqlite_clean_db=clean.sqlite + + +# +# Options defined in nova.utils +# + +# Whether to log monkey patching (boolean value) +#monkey_patch=false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator + +# Length of generated instance admin passwords (integer value) +#password_length=12 + +# Time period to generate instance usages for. Time period +# must be hour, day, month or year (string value) +#instance_usage_audit_period=month + +# Path to the rootwrap configuration file to use for running +# commands as root (string value) +#rootwrap_config=/etc/nova/rootwrap.conf + +# Explicitly specify the temporary working directory (string +# value) +#tempdir= + + +# +# Options defined in nova.wsgi +# + +# File name for the paste.deploy config for nova-api (string +# value) +#api_paste_config=api-paste.ini + +# A python format string that is used as the template to +# generate log lines. The following values can be formatted +# into it: client_ip, date_time, request_line, status_code, +# body_length, wall_seconds. (string value) +#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f + +# CA certificate file to use to verify connecting clients +# (string value) +#ssl_ca_file= + +# SSL certificate of API server (string value) +#ssl_cert_file= + +# SSL private key of API server (string value) +#ssl_key_file= + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Not supported on OS X. (integer value) +#tcp_keepidle=600 + +# Size of the pool of greenthreads used by wsgi (integer +# value) +#wsgi_default_pool_size=1000 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs). (integer value) +#max_header_line=16384 + + +# +# Options defined in nova.api.auth +# + +# Whether to use per-user rate limiting for the api. This +# option is only used by v2 api. Rate limiting is removed from +# v3 api. (boolean value) +#api_rate_limit=false + +# The strategy to use for auth: noauth or keystone. (string +# value) +auth_strategy=keystone + +# Treat X-Forwarded-For as the canonical remote address. Only +# enable this if you have a sanitizing proxy. (boolean value) +#use_forwarded_for=false + + +# +# Options defined in nova.api.ec2 +# + +# Number of failed auths before lockout. (integer value) +#lockout_attempts=5 + +# Number of minutes to lockout if triggered. (integer value) +#lockout_minutes=15 + +# Number of minutes for lockout window. (integer value) +#lockout_window=15 + +# URL to get token from ec2 request. (string value) +#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens + +# Return the IP address as private dns hostname in describe +# instances (boolean value) +#ec2_private_dns_show_ip=false + +# Validate security group names according to EC2 specification +# (boolean value) +#ec2_strict_validation=true + +# Time in seconds before ec2 timestamp expires (integer value) +#ec2_timestamp_expiry=300 + + +# +# Options defined in nova.api.ec2.cloud +# + +# The IP address of the EC2 API server (string value) +#ec2_host=$my_ip + +# The internal IP address of the EC2 API server (string value) +#ec2_dmz_host=$my_ip + +# The port of the EC2 API server (integer value) +#ec2_port=8773 + +# The protocol to use when connecting to the EC2 API server +# (http, https) (string value) +#ec2_scheme=http + +# The path prefix used to call the ec2 API server (string +# value) +#ec2_path=/services/Cloud + +# List of region=fqdn pairs separated by commas (list value) +#region_list= + + +# +# Options defined in nova.api.metadata.base +# + +# List of metadata versions to skip placing into the config +# drive (string value) +#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 + +# Driver to use for vendor data (string value) +#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData + + +# +# Options defined in nova.api.metadata.handler +# + +# Set flag to indicate Neutron will proxy metadata requests +# and resolve instance ids. (boolean value) +service_neutron_metadata_proxy=True + +# Shared secret to validate proxies Neutron metadata requests +# (string value) +#neutron_metadata_proxy_shared_secret= + + +# +# Options defined in nova.api.metadata.vendordata_json +# + +# File to load json formatted vendor data from (string value) +#vendordata_jsonfile_path= + + +# +# Options defined in nova.api.openstack.common +# + +# The maximum number of items returned in a single response +# from a collection resource (integer value) +#osapi_max_limit=1000 + +# Base URL that will be presented to users in links to the +# OpenStack Compute API (string value) +#osapi_compute_link_prefix= + +# Base URL that will be presented to users in links to glance +# resources (string value) +#osapi_glance_link_prefix= + + +# +# Options defined in nova.api.openstack.compute +# + +# Permit instance snapshot operations. (boolean value) +#allow_instance_snapshots=true + + +# +# Options defined in nova.api.openstack.compute.contrib +# + +# Specify list of extensions to load when using +# osapi_compute_extension option with +# nova.api.openstack.compute.contrib.select_extensions (list +# value) +#osapi_compute_ext_list= + + +# +# Options defined in nova.api.openstack.compute.contrib.fping +# + +# Full path to fping. (string value) +#fping_path=/usr/sbin/fping + + +# +# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks +# + +# Enables or disables quota checking for tenant networks +# (boolean value) +#enable_network_quota=false + +# Control for checking for default networks (string value) +#use_neutron_default_nets=False + +# Default tenant id when creating neutron networks (string +# value) +#neutron_default_tenant_id=default + + +# +# Options defined in nova.api.openstack.compute.extensions +# + +# osapi compute extension to load (multi valued) +#osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions + + +# +# Options defined in nova.api.openstack.compute.plugins.v3.hide_server_addresses +# + +# List of instance states that should hide network info (list +# value) +#osapi_hide_server_address_states=building + + +# +# Options defined in nova.api.openstack.compute.servers +# + +# Enables returning of the instance password by the relevant +# server API calls such as create, rebuild or rescue, If the +# hypervisor does not support password injection then the +# password returned will not be correct (boolean value) +#enable_instance_password=true + + +# +# Options defined in nova.api.sizelimit +# + +# The maximum body size per each osapi request(bytes) (integer +# value) +#osapi_max_request_body_size=114688 + + +# +# Options defined in nova.cert.rpcapi +# + +# The topic cert nodes listen on (string value) +#cert_topic=cert + + +# +# Options defined in nova.cloudpipe.pipelib +# + +# Image ID used when starting up a cloudpipe vpn server +# (string value) +#vpn_image_id=0 + +# Flavor for vpn instances (string value) +#vpn_flavor=m1.tiny + +# Template for cloudpipe instance boot script (string value) +#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template + +# Network to push into openvpn config (string value) +#dmz_net=10.0.0.0 + +# Netmask to push into openvpn config (string value) +#dmz_mask=255.255.255.0 + +# Suffix to add to project name for vpn key and secgroups +# (string value) +#vpn_key_suffix=-vpn + + +# +# Options defined in nova.cmd.novnc +# + +# Record sessions to FILE.[session_number] (boolean value) +#record=false + +# Become a daemon (background process) (boolean value) +#daemon=false + +# Disallow non-encrypted connections (boolean value) +#ssl_only=false + +# Source is ipv6 (boolean value) +#source_is_ipv6=false + +# SSL certificate file (string value) +#cert=self.pem + +# SSL key file (if separate from cert) (string value) +#key= + +# Run webserver on same port. Serve files from DIR. (string +# value) +#web=/usr/share/spice-html5 + + +# +# Options defined in nova.cmd.novncproxy +# + +# Host on which to listen for incoming requests (string value) +novncproxy_host={{ vnc_host }} + +# Port on which to listen for incoming requests (integer +# value) +#novncproxy_port=6080 + + +# +# Options defined in nova.cmd.spicehtml5proxy +# + +# Host on which to listen for incoming requests (string value) +#spicehtml5proxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer +# value) +#spicehtml5proxy_port=6082 + + +# +# Options defined in nova.compute.api +# + +# Allow destination machine to match source for resize. Useful +# when testing in single-host environments. (boolean value) +#allow_resize_to_same_host=false + +# Allow migrate machine to the same host. Useful when testing +# in single-host environments. (boolean value) +#allow_migrate_to_same_host=false + +# Availability zone to use when user doesn't specify one +# (string value) +#default_schedule_zone= + +# These are image properties which a snapshot should not +# inherit from an instance (list value) +#non_inheritable_image_properties=cache_in_nova,bittorrent + +# Kernel image that indicates not to use a kernel, but to use +# a raw disk image instead (string value) +#null_kernel=nokernel + +# When creating multiple instances with a single request using +# the os-multiple-create API extension, this template will be +# used to build the display name for each instance. The +# benefit is that the instances end up with different +# hostnames. To restore legacy behavior of every instance +# having the same name, set this option to "%(name)s". Valid +# keys for the template are: name, uuid, count. (string value) +#multi_instance_display_name_template=%(name)s-%(uuid)s + +# Maximum number of devices that will result in a local image +# being created on the hypervisor node. Setting this to 0 +# means nova will allow only boot from volume. A negative +# number means unlimited. (integer value) +#max_local_block_devices=3 + + +# +# Options defined in nova.compute.flavors +# + +# Default flavor to use for the EC2 API only. The Nova API +# does not support a default flavor. (string value) +#default_flavor=m1.small + + +# +# Options defined in nova.compute.manager +# + +# Console proxy host to use to connect to instances on this +# host. (string value) +#console_host=nova + +# Name of network to use to set access IPs for instances +# (string value) +#default_access_ip_network_name= + +# Whether to batch up the application of IPTables rules during +# a host restart and apply all at the end of the init phase +# (boolean value) +#defer_iptables_apply=false + +# Where instances are stored on disk (string value) +#instances_path=$state_path/instances + +# Generate periodic compute.instance.exists notifications +# (boolean value) +#instance_usage_audit=false + +# Number of 1 second retries needed in live_migration (integer +# value) +#live_migration_retry_count=30 + +# Whether to start guests that were running before the host +# rebooted (boolean value) +#resume_guests_state_on_host_boot=false + +# Number of times to retry network allocation on failures +# (integer value) +#network_allocate_retries=0 + +# The number of times to attempt to reap an instance's files. +# (integer value) +#maximum_instance_delete_attempts=5 + +# Interval to pull network bandwidth usage info. Not supported +# on all hypervisors. Set to 0 to disable. (integer value) +#bandwidth_poll_interval=600 + +# Interval to sync power states between the database and the +# hypervisor (integer value) +#sync_power_state_interval=600 + +# Number of seconds between instance info_cache self healing +# updates (integer value) +#heal_instance_info_cache_interval=60 + +# Interval in seconds for reclaiming deleted instances +# (integer value) +#reclaim_instance_interval=0 + +# Interval in seconds for gathering volume usages (integer +# value) +#volume_usage_poll_interval=0 + +# Interval in seconds for polling shelved instances to offload +# (integer value) +#shelved_poll_interval=3600 + +# Time in seconds before a shelved instance is eligible for +# removing from a host. -1 never offload, 0 offload when +# shelved (integer value) +#shelved_offload_time=0 + +# Interval in seconds for retrying failed instance file +# deletes (integer value) +#instance_delete_interval=300 + +# Action to take if a running deleted instance is +# detected.Valid options are 'noop', 'log', 'shutdown', or +# 'reap'. Set to 'noop' to take no action. (string value) +#running_deleted_instance_action=reap + +# Number of seconds to wait between runs of the cleanup task. +# (integer value) +#running_deleted_instance_poll_interval=1800 + +# Number of seconds after being deleted when a running +# instance should be considered eligible for cleanup. (integer +# value) +#running_deleted_instance_timeout=0 + +# Automatically hard reboot an instance if it has been stuck +# in a rebooting state longer than N seconds. Set to 0 to +# disable. (integer value) +#reboot_timeout=0 + +# Amount of time in seconds an instance can be in BUILD before +# going into ERROR status.Set to 0 to disable. (integer value) +#instance_build_timeout=0 + +# Automatically unrescue an instance after N seconds. Set to 0 +# to disable. (integer value) +#rescue_timeout=0 + +# Automatically confirm resizes after N seconds. Set to 0 to +# disable. (integer value) +#resize_confirm_window=0 + + +# +# Options defined in nova.compute.monitors +# + +# Monitor classes available to the compute which may be +# specified more than once. (multi valued) +#compute_available_monitors=nova.compute.monitors.all_monitors + +# A list of monitors that can be used for getting compute +# metrics. (list value) +#compute_monitors= + + +# +# Options defined in nova.compute.resource_tracker +# + +# Amount of disk in MB to reserve for the host (integer value) +#reserved_host_disk_mb=0 + +# Amount of memory in MB to reserve for the host (integer +# value) +#reserved_host_memory_mb=512 + +# Class that will manage stats for the local compute host +# (string value) +#compute_stats_class=nova.compute.stats.Stats + + +# +# Options defined in nova.compute.rpcapi +# + +# The topic compute nodes listen on (string value) +#compute_topic=compute + + +# +# Options defined in nova.conductor.tasks.live_migrate +# + +# Number of times to retry live-migration before failing. If +# == -1, try until out of hosts. If == 0, only try once, no +# retries. (integer value) +#migrate_max_retries=-1 + + +# +# Options defined in nova.console.manager +# + +# Driver to use for the console proxy (string value) +#console_driver=nova.console.xvp.XVPConsoleProxy + +# Stub calls to compute worker for tests (boolean value) +#stub_compute=false + +# Publicly visible name for this console host (string value) +#console_public_hostname=nova + + +# +# Options defined in nova.console.rpcapi +# + +# The topic console proxy nodes listen on (string value) +#console_topic=console + + +# +# Options defined in nova.console.vmrc +# + +# Port for VMware VMRC connections (integer value) +#console_vmrc_port=443 + +# Number of retries for retrieving VMRC information (integer +# value) +#console_vmrc_error_retries=10 + + +# +# Options defined in nova.console.xvp +# + +# XVP conf template (string value) +#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template + +# Generated XVP conf file (string value) +#console_xvp_conf=/etc/xvp.conf + +# XVP master process pid file (string value) +#console_xvp_pid=/var/run/xvp.pid + +# XVP log file (string value) +#console_xvp_log=/var/log/xvp.log + +# Port for XVP to multiplex VNC connections on (integer value) +#console_xvp_multiplex_port=5900 + + +# +# Options defined in nova.consoleauth +# + +# The topic console auth proxy nodes listen on (string value) +#consoleauth_topic=consoleauth + + +# +# Options defined in nova.consoleauth.manager +# + +# How many seconds before deleting tokens (integer value) +#console_token_ttl=600 + + +# +# Options defined in nova.db.api +# + +# Services to be added to the available pool on create +# (boolean value) +#enable_new_services=true + +# Template string to be used to generate instance names +# (string value) +#instance_name_template=instance-%08x + +# Template string to be used to generate snapshot names +# (string value) +#snapshot_name_template=snapshot-%s + + +# +# Options defined in nova.db.base +# + +# The driver to use for database access (string value) +#db_driver=nova.db + + +# +# Options defined in nova.db.sqlalchemy.api +# + +# When set, compute API will consider duplicate hostnames +# invalid within the specified scope, regardless of case. +# Should be empty, "project" or "global". (string value) +#osapi_compute_unique_server_name_scope= + + +# +# Options defined in nova.image.glance +# + +# Default glance hostname or IP address (string value) +#glance_host=$my_ip + +# Default glance port (integer value) +#glance_port=9292 + +# Default protocol to use when connecting to glance. Set to +# https for SSL. (string value) +#glance_protocol=http + +# A list of the glance api servers available to nova. Prefix +# with https:// for ssl-based glance api servers. +# ([hostname|ip]:port) (list value) +glance_api_servers={{ frontend_int_ip }}:9292 + +# Allow to perform insecure SSL (https) requests to glance +# (boolean value) +#glance_api_insecure=false + +# Number of retries when downloading an image from glance +# (integer value) +#glance_num_retries=0 + +# A list of url scheme that can be downloaded directly via the +# direct_url. Currently supported schemes: [file]. (list +# value) +#allowed_direct_url_schemes= + + +# +# Options defined in nova.image.s3 +# + +# Parent directory for tempdir used for image decryption +# (string value) +#image_decryption_dir=/tmp + +# Hostname or IP for OpenStack to use when accessing the S3 +# api (string value) +#s3_host=$my_ip + +# Port used when accessing the S3 api (integer value) +#s3_port=3333 + +# Access key to use for S3 server for images (string value) +#s3_access_key=notchecked + +# Secret key to use for S3 server for images (string value) +#s3_secret_key=notchecked + +# Whether to use SSL when talking to S3 (boolean value) +#s3_use_ssl=false + +# Whether to affix the tenant id to the access key when +# downloading from S3 (boolean value) +#s3_affix_tenant=false + + +# +# Options defined in nova.ipv6.api +# + +# Backend to use for IPv6 generation (string value) +#ipv6_backend=rfc2462 + + +# +# Options defined in nova.network +# + +# The full class name of the network API class to use (string +# value) +network_api_class=nova.network.neutronv2.api.API + +# +# Options defined in nova.network.driver +# + +# Driver to use for network creation (string value) +#network_driver=nova.network.linux_net + + +# +# Options defined in nova.network.floating_ips +# + +# Default pool for floating IPs (string value) +#default_floating_pool=nova + +# Autoassigning floating IP to VM (boolean value) +#auto_assign_floating_ip=false + +# Full class name for the DNS Manager for floating IPs (string +# value) +#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Manager for instance IPs (string +# value) +#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Zone for instance IPs (string +# value) +#instance_dns_domain= + + +# +# Options defined in nova.network.ldapdns +# + +# URL for LDAP server which will store DNS entries (string +# value) +#ldap_dns_url=ldap://ldap.example.com:389 + +# User for LDAP DNS (string value) +#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org + +# Password for LDAP DNS (string value) +#ldap_dns_password=password + +# Hostmaster for LDAP DNS driver Statement of Authority +# (string value) +#ldap_dns_soa_hostmaster=hostmaster@example.org + +# DNS Servers for LDAP DNS driver (multi valued) +#ldap_dns_servers=dns.example.org + +# Base DN for DNS entries in LDAP (string value) +#ldap_dns_base_dn=ou=hosts,dc=example,dc=org + +# Refresh interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_refresh=1800 + +# Retry interval (in seconds) for LDAP DNS driver Statement of +# Authority (string value) +#ldap_dns_soa_retry=3600 + +# Expiry interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_expiry=86400 + +# Minimum interval (in seconds) for LDAP DNS driver Statement +# of Authority (string value) +#ldap_dns_soa_minimum=7200 + + +# +# Options defined in nova.network.linux_net +# + +# Location of flagfiles for dhcpbridge (multi valued) +#dhcpbridge_flagfile=/etc/nova/nova.conf + +# Location to keep network config files (string value) +#networks_path=$state_path/networks + +# Interface for public IP addresses (string value) +#public_interface=eth0 + +# MTU setting for network interface (integer value) +#network_device_mtu= + +# Location of nova-dhcpbridge (string value) +#dhcpbridge=/usr/bin/nova-dhcpbridge + +# Public IP of network host (string value) +#routing_source_ip=$my_ip + +# Lifetime of a DHCP lease in seconds (integer value) +#dhcp_lease_time=120 + +# If set, uses specific DNS server for dnsmasq. Can be +# specified multiple times. (multi valued) +#dns_server= + +# If set, uses the dns1 and dns2 from the network ref. as dns +# servers. (boolean value) +#use_network_dns_servers=false + +# A list of dmz range that should be accepted (list value) +#dmz_cidr= + +# Traffic to this range will always be snatted to the fallback +# ip, even if it would normally be bridged out of the node. +# Can be specified multiple times. (multi valued) +#force_snat_range= + +# Override the default dnsmasq settings with this file (string +# value) +#dnsmasq_config_file= + +# Driver used to create ethernet devices. (string value) +linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver + +# Name of Open vSwitch bridge used with linuxnet (string +# value) +#linuxnet_ovs_integration_bridge=br-int + +# Send gratuitous ARPs for HA setup (boolean value) +#send_arp_for_ha=false + +# Send this many gratuitous ARPs for HA setup (integer value) +#send_arp_for_ha_count=3 + +# Use single default gateway. Only first nic of vm will get +# default gateway from dhcp server (boolean value) +#use_single_default_gateway=false + +# An interface that bridges can forward to. If this is set to +# all then all traffic will be forwarded. Can be specified +# multiple times. (multi valued) +#forward_bridge_interface=all + +# The IP address for the metadata API server (string value) +metadata_host={{ nova_metadata_host }} + +# The port for the metadata API port (integer value) +#metadata_port=8775 + +# Regular expression to match iptables rule that should always +# be on the top. (string value) +#iptables_top_regex= + +# Regular expression to match iptables rule that should always +# be on the bottom. (string value) +#iptables_bottom_regex= + +# The table that iptables to jump to when a packet is to be +# dropped. (string value) +#iptables_drop_action=DROP + +# Amount of time, in seconds, that ovs_vsctl should wait for a +# response from the database. 0 is to wait forever. (integer +# value) +#ovs_vsctl_timeout=120 + +# If passed, use fake network devices and addresses (boolean +# value) +#fake_network=false + + +# +# Options defined in nova.network.manager +# + +# Bridge for simple network instances (string value) +#flat_network_bridge= + +# DNS server for simple network (string value) +#flat_network_dns=8.8.4.4 + +# Whether to attempt to inject network setup into guest +# (boolean value) +#flat_injected=false + +# FlatDhcp will bridge into this interface if set (string +# value) +#flat_interface= + +# First VLAN for private networks (integer value) +#vlan_start=100 + +# VLANs will bridge into this interface if set (string value) +#vlan_interface= + +# Number of networks to support (integer value) +#num_networks=1 + +# Public IP for the cloudpipe VPN servers (string value) +#vpn_ip=$my_ip + +# First Vpn port for private networks (integer value) +#vpn_start=1000 + +# Number of addresses in each private subnet (integer value) +#network_size=256 + +# Fixed IPv6 address block (string value) +#fixed_range_v6=fd00::/48 + +# Default IPv4 gateway (string value) +#gateway= + +# Default IPv6 gateway (string value) +#gateway_v6= + +# Number of addresses reserved for vpn clients (integer value) +#cnt_vpn_clients=0 + +# Seconds after which a deallocated IP is disassociated +# (integer value) +#fixed_ip_disassociate_timeout=600 + +# Number of attempts to create unique mac address (integer +# value) +#create_unique_mac_address_attempts=5 + +# If True, skip using the queue and make local calls (boolean +# value) +#fake_call=false + +# If True, unused gateway devices (VLAN and bridge) are +# deleted in VLAN network mode with multi hosted networks +# (boolean value) +#teardown_unused_network_gateway=false + +# If True, send a dhcp release on instance termination +# (boolean value) +#force_dhcp_release=True + +# If True in multi_host mode, all compute hosts share the same +# dhcp address. The same IP address used for DHCP will be +# added on each nova-network node which is only visible to the +# vms on the same host. (boolean value) +#share_dhcp_address=false + +# If True, when a DNS entry must be updated, it sends a fanout +# cast to all network hosts to update their DNS entries in +# multi host mode (boolean value) +#update_dns_entries=false + +# Number of seconds to wait between runs of updates to DNS +# entries. (integer value) +#dns_update_periodic_interval=-1 + +# Domain to use for building the hostnames (string value) +#dhcp_domain=novalocal + +# Indicates underlying L3 management library (string value) +#l3_lib=nova.network.l3.LinuxNetL3 + + +# +# Options defined in nova.network.neutronv2.api +# + +# URL for connecting to neutron (string value) +neutron_url={{ neutron_internal_url }} + +# Timeout value for connecting to neutron in seconds (integer +# value) +#neutron_url_timeout=30 + +# Username for connecting to neutron in admin context (string +# value) +neutron_admin_username=neutron + +# Password for connecting to neutron in admin context (string +# value) +neutron_admin_password={{ neutron_identity_password }} + +# Tenant id for connecting to neutron in admin context (string +# value) +#neutron_admin_tenant_id= + +# Tenant name for connecting to neutron in admin context. This +# option is mutually exclusive with neutron_admin_tenant_id. +# Note that with Keystone V3 tenant names are only unique +# within a domain. (string value) +neutron_admin_tenant_name={{ service_tenant }} + +# Region name for connecting to neutron in admin context +# (string value) +neutron_region_name={{ openstack_region }} + +# Authorization URL for connecting to neutron in admin context +# (string value) +neutron_admin_auth_url={{ keystone_internal_url }} + +# If set, ignore any SSL validation issues (boolean value) +#neutron_api_insecure=false + +# Authorization strategy for connecting to neutron in admin +# context (string value) +neutron_auth_strategy=keystone + +# Name of Integration Bridge used by Open vSwitch (string +# value) +#neutron_ovs_bridge=br-int + +# Number of seconds before querying neutron for extensions +# (integer value) +#neutron_extension_sync_interval=600 + +# Location of CA certificates file to use for neutron client +# requests. (string value) +#neutron_ca_certificates_file= + + +# +# Options defined in nova.network.rpcapi +# + +# The topic network nodes listen on (string value) +#network_topic=network + +# Default value for multi_host in networks. Also, if set, some +# rpc network calls will be sent directly to host. (boolean +# value) +#multi_host=false + + +# +# Options defined in nova.network.security_group.openstack_driver +# + +# The full class name of the security API class (string value) +security_group_api=neutron + + +# +# Options defined in nova.objectstore.s3server +# + +# Path to S3 buckets (string value) +#buckets_path=$state_path/buckets + +# IP address for S3 API to listen (string value) +#s3_listen=0.0.0.0 + +# Port for S3 API to listen (integer value) +#s3_listen_port=3333 + + +# +# Options defined in nova.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, +# and :, where 0 results in listening on a random +# tcp port number, results in listening on the +# specified port number and not enabling backdoorif it is in +# use and : results in listening on the smallest +# unused port number within the specified range of port +# numbers. The chosen port is displayed in the service's log +# file. (string value) +#backdoor_port= + + +# +# Options defined in nova.openstack.common.lockutils +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +#lock_path=/var/lib/nova/tmp + + +# +# Options defined in nova.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +debug={{ log_debug }} + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +verbose={{ log_verbose }} + +# Log output to standard error (boolean value) +#use_stderr=False + +# format string to use for log messages with context (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s + +# format string to use for log messages without context +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# data to append to log format when level is DEBUG (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# prefix each line of exception output with this format +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# list of logger=LEVEL pairs (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN + +# publish error events (boolean value) +#publish_errors=false + +# make deprecations fatal (boolean value) +#fatal_deprecations=false + +# If an instance is passed with the log message, format it +# like this (string value) +#instance_format="[instance: %(uuid)s] " + +# If an instance UUID is passed with the log message, format +# it like this (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of logging configuration file. It does not disable +# existing loggers, but just appends specified logging +# configuration to any other existing logging options. Please +# see the Python logging module documentation for details on +# logging configuration files. (string value) +# Deprecated group;name - DEFAULT;log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group;name - DEFAULT;logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths (string value) +# Deprecated group;name - DEFAULT;logdir +#log_dir=/var/log/nova + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and then will be changed in J to honor RFC5424 +# (boolean value) +#use_syslog=false + +# (Optional) Use syslog rfc5424 format for logging. If +# enabled, will add APP-NAME (RFC5424) before the MSG part of +# the syslog message. The old format without APP-NAME is +# deprecated in I, and will be removed in J. (boolean value) +#use_syslog_rfc_format=false + +# syslog facility to receive log lines (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in nova.openstack.common.memorycache +# + +# Memcached servers or None for in process cache. (list value) +#memcached_servers= + + +# +# Options defined in nova.openstack.common.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + + +# +# Options defined in nova.pci.pci_request +# + +# An alias for a PCI passthrough device requirement. This +# allows users to specify the alias in the extra_spec for a +# flavor, without needing to repeat all the PCI property +# requirements. For example: pci_alias = { "name": +# "QuicAssist", "product_id": "0443", "vendor_id": "8086", +# "device_type": "ACCEL" } defines an alias for the Intel +# QuickAssist card. (multi valued) (multi valued) +#pci_alias= + + +# +# Options defined in nova.pci.pci_whitelist +# + +# White list of PCI devices available to VMs. For example: +# pci_passthrough_whitelist = [{"vendor_id": "8086", +# "product_id": "0443"}] (multi valued) +#pci_passthrough_whitelist= + + +# +# Options defined in nova.scheduler.driver +# + +# The scheduler host manager class to use (string value) +#scheduler_host_manager=nova.scheduler.host_manager.HostManager + +# Maximum number of attempts to schedule an instance (integer +# value) +#scheduler_max_attempts=3 + + +# +# Options defined in nova.scheduler.filter_scheduler +# + +# New instances will be scheduled on a host chosen randomly +# from a subset of the N best hosts. This property defines the +# subset size that a host is chosen from. A value of 1 chooses +# the first host returned by the weighing functions. This +# value must be at least 1. Any value less than 1 will be +# ignored, and 1 will be used instead (integer value) +#scheduler_host_subset_size=1 + + +# +# Options defined in nova.scheduler.filters.aggregate_image_properties_isolation +# + +# Force the filter to consider only keys matching the given +# namespace. (string value) +#aggregate_image_properties_isolation_namespace= + +# The separator used between the namespace and keys (string +# value) +#aggregate_image_properties_isolation_separator=. + + +# +# Options defined in nova.scheduler.filters.core_filter +# + +# Virtual CPU to physical CPU allocation ratio which affects +# all CPU filters. This configuration specifies a global ratio +# for CoreFilter. For AggregateCoreFilter, it will fall back +# to this configuration value if no per-aggregate setting +# found. (floating point value) +#cpu_allocation_ratio=16.0 + + +# +# Options defined in nova.scheduler.filters.disk_filter +# + +# Virtual disk to physical disk allocation ratio (floating +# point value) +#disk_allocation_ratio=1.0 + + +# +# Options defined in nova.scheduler.filters.io_ops_filter +# + +# Ignore hosts that have too many +# builds/resizes/snaps/migrations (integer value) +#max_io_ops_per_host=8 + + +# +# Options defined in nova.scheduler.filters.isolated_hosts_filter +# + +# Images to run on isolated host (list value) +#isolated_images= + +# Host reserved for specific images (list value) +#isolated_hosts= + +# Whether to force isolated hosts to run only isolated images +# (boolean value) +#restrict_isolated_hosts_to_isolated_images=true + + +# +# Options defined in nova.scheduler.filters.num_instances_filter +# + +# Ignore hosts that have too many instances (integer value) +#max_instances_per_host=50 + + +# +# Options defined in nova.scheduler.filters.ram_filter +# + +# Virtual ram to physical ram allocation ratio which affects +# all ram filters. This configuration specifies a global ratio +# for RamFilter. For AggregateRamFilter, it will fall back to +# this configuration value if no per-aggregate setting found. +# (floating point value) +#ram_allocation_ratio=1.5 + + +# +# Options defined in nova.scheduler.host_manager +# + +# Filter classes available to the scheduler which may be +# specified more than once. An entry of +# "nova.scheduler.filters.standard_filters" maps to all +# filters included with nova. (multi valued) +#scheduler_available_filters=nova.scheduler.filters.all_filters + +# Which filter class names to use for filtering hosts when not +# specified in the request. (list value) +#scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter + +# Which weight class names to use for weighing hosts (list +# value) +#scheduler_weight_classes=nova.scheduler.weights.all_weighers + + +# +# Options defined in nova.scheduler.manager +# + +# Default driver to use for the scheduler (string value) +#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler + +# How often (in seconds) to run periodic tasks in the +# scheduler driver of your choice. Please note this is likely +# to interact with the value of service_down_time, but exactly +# how they interact will depend on your choice of scheduler +# driver. (integer value) +#scheduler_driver_task_period=60 + + +# +# Options defined in nova.scheduler.rpcapi +# + +# The topic scheduler nodes listen on (string value) +#scheduler_topic=scheduler + + +# +# Options defined in nova.scheduler.scheduler_options +# + +# Absolute path to scheduler configuration JSON file. (string +# value) +#scheduler_json_config_location= + + +# +# Options defined in nova.scheduler.weights.ram +# + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=1.0 + + +# +# Options defined in nova.servicegroup.api +# + +# The driver for servicegroup service (valid options are: db, +# zk, mc) (string value) +#servicegroup_driver=db + + +# +# Options defined in nova.virt.configdrive +# + +# Config drive format. One of iso9660 (default) or vfat +# (string value) +#config_drive_format=iso9660 + +# Where to put temporary files associated with config drive +# creation (string value) +#config_drive_tempdir= + +# Set to force injection to take place on a config drive (if +# set, valid options are: always) (string value) +force_config_drive=always + +# Name and optionally path of the tool used for ISO image +# creation (string value) +#mkisofs_cmd=genisoimage + + +# +# Options defined in nova.virt.cpu +# + +# Defines which pcpus that instance vcpus can use. For +# example, "4-12,^8,15" (string value) +#vcpu_pin_set= + + +# +# Options defined in nova.virt.disk.api +# + +# Template file for injected network (string value) +#injected_network_template=/usr/share/nova/interfaces.template + +# Name of the mkfs commands for ephemeral device. The format +# is = (multi valued) +#virt_mkfs= + +# Attempt to resize the filesystem by accessing the image over +# a block device. This is done by the host and may not be +# necessary if the image contains a recent version of cloud- +# init. Possible mechanisms require the nbd driver (for qcow +# and raw), or loop (for raw). (boolean value) +#resize_fs_using_block_device=false + + +# +# Options defined in nova.virt.disk.mount.nbd +# + +# Amount of time, in seconds, to wait for NBD device start up. +# (integer value) +#timeout_nbd=10 + + +# +# Options defined in nova.virt.driver +# + +# Driver to use for controlling virtualization. Options +# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, +# fake.FakeDriver, baremetal.BareMetalDriver, +# vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver (string +# value) +#compute_driver=libvirt.LibvirtDriver + +# The default format an ephemeral_volume will be formatted +# with on creation. (string value) +#default_ephemeral_format= + +# VM image preallocation mode: "none" => no storage +# provisioning is done up front, "space" => storage is fully +# allocated at instance start (string value) +#preallocate_images=none + +# Whether to use cow images (boolean value) +#use_cow_images=true + +# Fail instance boot if vif plugging fails (boolean value) +#vif_plugging_is_fatal=true + +# Number of seconds to wait for neutron vif plugging events to +# arrive before continuing or failing (see +# vif_plugging_is_fatal). If this is set to zero and +# vif_plugging_is_fatal is False, events should not be +# expected to arrive at all. (integer value) +#vif_plugging_timeout=300 + + +# +# Options defined in nova.virt.firewall +# + +# Firewall driver (defaults to hypervisor specific iptables +# driver) (string value) +firewall_driver=nova.virt.firewall.NoopFirewallDriver + +# Whether to allow network traffic from same network (boolean +# value) +#allow_same_net_traffic=true + + +# +# Options defined in nova.virt.imagecache +# + +# Number of seconds to wait between runs of the image cache +# manager (integer value) +#image_cache_manager_interval=2400 + +# Where cached images are stored under $instances_path. This +# is NOT the full path - just a folder name. For per-compute- +# host cached images, set to _base_$my_ip (string value) +# Deprecated group;name - DEFAULT;base_dir_name +#image_cache_subdirectory_name=_base + +# Should unused base images be removed? (boolean value) +#remove_unused_base_images=true + +# Unused unresized base images younger than this will not be +# removed (integer value) +#remove_unused_original_minimum_age_seconds=86400 + + +# +# Options defined in nova.virt.imagehandler +# + +# Specifies which image handler extension names to use for +# handling images. The first extension in the list which can +# handle the image with a suitable location will be used. +# (list value) +#image_handlers=download + + +# +# Options defined in nova.virt.images +# + +# Force backing images to raw format (boolean value) +#force_raw_images=true + + +# +# Options defined in nova.vnc +# + +# Location of VNC console proxy, in the form +# "http://127.0.0.1:6080/vnc_auto.html" (string value) +novncproxy_base_url=http://{{ vnc_host }}:6080/vnc_auto.html + +# Location of nova xvp VNC console proxy, in the form +# "http://127.0.0.1:6081/console" (string value) +xvpvncproxy_base_url=http://{{ vnc_host }}:6081/console + +# IP address on which instance vncservers should listen +# (string value) +vncserver_listen={{ vnc_host }} + +# The address to which proxy clients (like nova-xvpvncproxy) +# should connect (string value) +vncserver_proxyclient_address={{ vnc_host }} + +# Enable VNC related features (boolean value) +vnc_enabled=true + +# Keymap for VNC (string value) +vnc_keymap=ja + + +# +# Options defined in nova.vnc.xvp_proxy +# + +# Port that the XCP VNC proxy should bind to (integer value) +#xvpvncproxy_port=6081 + +# Address that the XCP VNC proxy should bind to (string value) +#xvpvncproxy_host=0.0.0.0 + + +# +# Options defined in nova.volume +# + +# The full class name of the volume API class to use (string +# value) +#volume_api_class=nova.volume.cinder.API + + +# +# Options defined in nova.volume.cinder +# + +# Info to match when looking for cinder in the service +# catalog. Format is: separated values of the form: +# :: (string value) +#cinder_catalog_info=volume:cinder:publicURL + +# Override service catalog lookup with template for cinder +# endpoint e.g. http://localhost:8776/v1/%(project_id)s +# (string value) +#cinder_endpoint_template= + +# Region name of this node (string value) +#os_region_name= + +# Location of ca certificates file to use for cinder client +# requests. (string value) +#cinder_ca_certificates_file= + +# Number of cinderclient retries on failed http calls (integer +# value) +#cinder_http_retries=3 + +# Allow to perform insecure SSL requests to cinder (boolean +# value) +#cinder_api_insecure=false + +# Allow attach between instance and volume in different +# availability zones. (boolean value) +#cinder_cross_az_attach=true + + +[baremetal] + +# +# Options defined in nova.virt.baremetal.db.api +# + +# The backend to use for bare-metal database (string value) +#db_backend=sqlalchemy + + +# +# Options defined in nova.virt.baremetal.db.sqlalchemy.session +# + +# The SQLAlchemy connection string used to connect to the +# bare-metal database (string value) +#sql_connection=sqlite:///$state_path/baremetal_nova.sqlite + + +# +# Options defined in nova.virt.baremetal.driver +# + +# Baremetal VIF driver. (string value) +#vif_driver=nova.virt.baremetal.vif_driver.BareMetalVIFDriver + +# Baremetal volume driver. (string value) +#volume_driver=nova.virt.baremetal.volume_driver.LibvirtVolumeDriver + +# A list of additional capabilities corresponding to +# flavor_extra_specs for this compute host to advertise. Valid +# entries are name=value, pairs For example, "key1:val1, +# key2:val2" (list value) +# Deprecated group;name - DEFAULT;instance_type_extra_specs +#flavor_extra_specs= + +# Baremetal driver back-end (pxe or tilera) (string value) +#driver=nova.virt.baremetal.pxe.PXE + +# Baremetal power management method (string value) +#power_manager=nova.virt.baremetal.ipmi.IPMI + +# Baremetal compute node's tftp root path (string value) +#tftp_root=/tftpboot + + +# +# Options defined in nova.virt.baremetal.ipmi +# + +# Path to baremetal terminal program (string value) +#terminal=shellinaboxd + +# Path to baremetal terminal SSL cert(PEM) (string value) +#terminal_cert_dir= + +# Path to directory stores pidfiles of baremetal_terminal +# (string value) +#terminal_pid_dir=$state_path/baremetal/console + +# Maximal number of retries for IPMI operations (integer +# value) +#ipmi_power_retry=10 + + +# +# Options defined in nova.virt.baremetal.pxe +# + +# Default kernel image ID used in deployment phase (string +# value) +#deploy_kernel= + +# Default ramdisk image ID used in deployment phase (string +# value) +#deploy_ramdisk= + +# Template file for injected network config (string value) +#net_config_template=$pybasedir/nova/virt/baremetal/net-dhcp.ubuntu.template + +# Additional append parameters for baremetal PXE boot (string +# value) +#pxe_append_params=nofb nomodeset vga=normal + +# Template file for PXE configuration (string value) +#pxe_config_template=$pybasedir/nova/virt/baremetal/pxe_config.template + +# If True, enable file injection for network info, files and +# admin password (boolean value) +#use_file_injection=false + +# Timeout for PXE deployments. Default: 0 (unlimited) (integer +# value) +#pxe_deploy_timeout=0 + +# If set, pass the network configuration details to the +# initramfs via cmdline. (boolean value) +#pxe_network_config=false + +# This gets passed to Neutron as the bootfile dhcp parameter. +# (string value) +#pxe_bootfile_name=pxelinux.0 + + +# +# Options defined in nova.virt.baremetal.tilera_pdu +# + +# IP address of tilera pdu (string value) +#tile_pdu_ip=10.0.100.1 + +# Management script for tilera pdu (string value) +#tile_pdu_mgr=/tftpboot/pdu_mgr + +# Power status of tilera PDU is OFF (integer value) +#tile_pdu_off=2 + +# Power status of tilera PDU is ON (integer value) +#tile_pdu_on=1 + +# Power status of tilera PDU (integer value) +#tile_pdu_status=9 + +# Wait time in seconds until check the result after tilera +# power operations (integer value) +#tile_power_wait=9 + + +# +# Options defined in nova.virt.baremetal.virtual_power_driver +# + +# IP or name to virtual power host (string value) +#virtual_power_ssh_host= + +# Port to use for ssh to virtual power host (integer value) +#virtual_power_ssh_port=22 + +# Base command to use for virtual power(vbox, virsh) (string +# value) +#virtual_power_type=virsh + +# User to execute virtual power commands as (string value) +#virtual_power_host_user= + +# Password for virtual power host_user (string value) +#virtual_power_host_pass= + +# The ssh key for virtual power host_user (string value) +#virtual_power_host_key= + + +# +# Options defined in nova.virt.baremetal.volume_driver +# + +# Do not set this out of dev/test environments. If a node does +# not have a fixed PXE IP address, volumes are exported with +# globally opened ACL (boolean value) +#use_unsafe_iscsi=false + +# The iSCSI IQN prefix used in baremetal volume connections. +# (string value) +#iscsi_iqn_prefix=iqn.2010-10.org.openstack.baremetal + + +[cells] + +# +# Options defined in nova.cells.manager +# + +# Cells communication driver to use (string value) +#driver=nova.cells.rpc_driver.CellsRPCDriver + +# Number of seconds after an instance was updated or deleted +# to continue to update cells (integer value) +#instance_updated_at_threshold=3600 + +# Number of instances to update per periodic task run (integer +# value) +#instance_update_num_instances=1 + + +# +# Options defined in nova.cells.messaging +# + +# Maximum number of hops for cells routing. (integer value) +#max_hop_count=10 + +# Cells scheduler to use (string value) +#scheduler=nova.cells.scheduler.CellsScheduler + + +# +# Options defined in nova.cells.opts +# + +# Enable cell functionality (boolean value) +#enable=false + +# The topic cells nodes listen on (string value) +#topic=cells + +# Manager for cells (string value) +#manager=nova.cells.manager.CellsManager + +# Name of this cell (string value) +#name=nova + +# Key/Multi-value list with the capabilities of the cell (list +# value) +#capabilities=hypervisor=xenserver;kvm,os=linux;windows + +# Seconds to wait for response from a call to a cell. (integer +# value) +#call_timeout=60 + +# Percentage of cell capacity to hold in reserve. Affects both +# memory and disk utilization (floating point value) +#reserve_percent=10.0 + +# Type of cell: api or compute (string value) +#cell_type=compute + +# Number of seconds after which a lack of capability and +# capacity updates signals the child cell is to be treated as +# a mute. (integer value) +#mute_child_interval=300 + +# Seconds between bandwidth updates for cells. (integer value) +#bandwidth_update_interval=600 + + +# +# Options defined in nova.cells.rpc_driver +# + +# Base queue name to use when communicating between cells. +# Various topics by message type will be appended to this. +# (string value) +#rpc_driver_queue_base=cells.intercell + + +# +# Options defined in nova.cells.scheduler +# + +# Filter classes the cells scheduler should use. An entry of +# "nova.cells.filters.all_filters" maps to all cells filters +# included with nova. (list value) +#scheduler_filter_classes=nova.cells.filters.all_filters + +# Weigher classes the cells scheduler should use. An entry of +# "nova.cells.weights.all_weighers" maps to all cell weighers +# included with nova. (list value) +#scheduler_weight_classes=nova.cells.weights.all_weighers + +# How many retries when no cells are available. (integer +# value) +#scheduler_retries=10 + +# How often to retry in seconds when no cells are available. +# (integer value) +#scheduler_retry_delay=2 + + +# +# Options defined in nova.cells.state +# + +# Interval, in seconds, for getting fresh cell information +# from the database. (integer value) +#db_check_interval=60 + +# Configuration file from which to read cells configuration. +# If given, overrides reading cells from the database. (string +# value) +#cells_config= + + +# +# Options defined in nova.cells.weights.mute_child +# + +# Multiplier used to weigh mute children. (The value should be +# negative.) (floating point value) +#mute_weight_multiplier=-10.0 + +# Weight value assigned to mute children. (The value should be +# positive.) (floating point value) +#mute_weight_value=1000.0 + + +# +# Options defined in nova.cells.weights.ram_by_instance_type +# + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=10.0 + + +# +# Options defined in nova.cells.weights.weight_offset +# + +# Multiplier used to weigh offset weigher. (floating point +# value) +#offset_weight_multiplier=1.0 + + +[conductor] + +# +# Options defined in nova.conductor.api +# + +# Perform nova-conductor operations locally (boolean value) +#use_local=false + +# The topic on which conductor nodes listen (string value) +#topic=conductor + +# Full class name for the Manager for conductor (string value) +#manager=nova.conductor.manager.ConductorManager + +# Number of workers for OpenStack Conductor service. The +# default will be the number of CPUs available. (integer +# value) +#workers= + + +[database] + +# +# Options defined in nova.db.sqlalchemy.api +# + +# The SQLAlchemy connection string used to connect to the +# slave database (string value) +#slave_connection= + + +# +# Options defined in nova.openstack.common.db.options +# + +# The file name to use with SQLite (string value) +#sqlite_db=nova.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous=true + +# The backend to use for db (string value) +# Deprecated group;name - DEFAULT;db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group;name - DEFAULT;sql_connection +# Deprecated group;name - [DATABASE]/sql_connection +# Deprecated group;name - [sql]/connection +connection=mysql://nova:{{ nova_db_password }}@{{ sql_host }}/nova + + + +# The SQL mode to be used for MySQL sessions (default is +# empty, meaning do not override any server-side SQL mode +# setting) (string value) +#mysql_sql_mode= + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group;name - DEFAULT;sql_idle_timeout +# Deprecated group;name - [DATABASE]/sql_idle_timeout +# Deprecated group;name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group;name - DEFAULT;sql_min_pool_size +# Deprecated group;name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group;name - DEFAULT;sql_max_pool_size +# Deprecated group;name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group;name - DEFAULT;sql_max_retries +# Deprecated group;name - [DATABASE]/sql_max_retries +#max_retries=-1 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group;name - DEFAULT;sql_retry_interval +# Deprecated group;name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group;name - DEFAULT;sql_max_overflow +# Deprecated group;name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group;name - DEFAULT;sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group;name - DEFAULT;sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group;name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect=false + +# seconds between db connection retries (integer value) +#db_retry_interval=1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval=true + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval=10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries=20 + + +[hyperv] + +# +# Options defined in nova.virt.hyperv.pathutils +# + +# The name of a Windows share name mapped to the +# "instances_path" dir and used by the resize feature to copy +# files to the target host. If left blank, an administrative +# share will be used, looking for the same "instances_path" +# used locally (string value) +#instances_path_share= + + +# +# Options defined in nova.virt.hyperv.utilsfactory +# + +# Force V1 WMI utility classes (boolean value) +#force_hyperv_utils_v1=false + +# Force V1 volume utility class (boolean value) +#force_volumeutils_v1=false + + +# +# Options defined in nova.virt.hyperv.vif +# + +# External virtual switch Name, if not provided, the first +# external virtual switch is used (string value) +#vswitch_name= + + +# +# Options defined in nova.virt.hyperv.vmops +# + +# Required for live migration among hosts with different CPU +# features (boolean value) +#limit_cpu_features=false + +# Sets the admin password in the config drive image (boolean +# value) +#config_drive_inject_password=false + +# Path of qemu-img command which is used to convert between +# different image types (string value) +#qemu_img_cmd=qemu-img.exe + +# Attaches the Config Drive image as a cdrom drive instead of +# a disk drive (boolean value) +#config_drive_cdrom=false + +# Enables metrics collections for an instance by using +# Hyper-V's metric APIs. Collected data can by retrieved by +# other apps and services, e.g.: Ceilometer. Requires Hyper-V +# / Windows Server 2012 and above (boolean value) +#enable_instance_metrics_collection=false + +# Enables dynamic memory allocation (ballooning) when set to a +# value greater than 1. The value expresses the ratio between +# the total RAM assigned to an instance and its startup RAM +# amount. For example a ratio of 2.0 for an instance with +# 1024MB of RAM implies 512MB of RAM allocated at startup +# (floating point value) +#dynamic_memory_ratio=1.0 + + +# +# Options defined in nova.virt.hyperv.volumeops +# + +# The number of times to retry to attach a volume (integer +# value) +#volume_attach_retry_count=10 + +# Interval between volume attachment attempts, in seconds +# (integer value) +#volume_attach_retry_interval=5 + +# The number of times to retry checking for a disk mounted via +# iSCSI. (integer value) +#mounted_disk_query_retry_count=10 + +# Interval between checks for a mounted iSCSI disk, in +# seconds. (integer value) +#mounted_disk_query_retry_interval=5 + + +[image_file_url] + +# +# Options defined in nova.image.download.file +# + +# List of file systems that are configured in this file in the +# image_file_url: sections (list value) +#filesystems= + + +[keymgr] + +# +# Options defined in nova.keymgr +# + +# The full class name of the key manager API class (string +# value) +#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager + + +# +# Options defined in nova.keymgr.conf_key_mgr +# + +# Fixed key returned by key manager, specified in hex (string +# value) +#fixed_key= + + +[keystone_authtoken] +auth_host={{ frontend_int_ip }} +auth_port=35357 +auth_protocol=http +admin_tenant_name={{ service_tenant }} +admin_user=nova +admin_password={{ nova_identity_password }} +identity_uri=http://{{ frontend_int_ip }}:35357 +auth_uri=http://{{ frontend_ext_ip }}:5000 + +# +# Options defined in keystoneclient.middleware.auth_token +# + +# Prefix to prepend at the beginning of the path (string +# value) +#auth_admin_prefix= + +# Host providing the admin Identity API endpoint (string +# value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint(http or https) +# (string value) +#auth_protocol=http + +# Complete public Identity API endpoint (string value) +#auth_uri= + +# API version of the admin Identity API endpoint (string +# value) +#auth_version=v2.0 + +# Do not handle authorization requests within the middleware, +# but delegate the authorization decision to downstream WSGI +# components (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API +# server. (boolean value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating +# with Identity API Server. (integer value) +#http_request_max_retries=3 + +# Allows to pass in the name of a fake http_handler callback +# function used instead of httplib.HTTPConnection or +# httplib.HTTPSConnection. Useful for unit testing where +# network is not available. (string value) +#http_handler= + +# Single shared secret with the Keystone configuration used +# for bootstrapping a Keystone installation, or otherwise +# bypassing the normal authentication process. (string value) +#admin_token= + +# Keystone account username (string value) +#admin_user=%SERVICE_USER% + +# Keystone account password (string value) +#admin_password=%SERVICE_PASSWORD% + +# Keystone service account tenant name to validate user tokens +# (string value) +#admin_tenant_name=%SERVICE_TENANT_NAME% + +# Env key for the swift cache (string value) +#cache= + +# Required if Keystone server requires client certificate +# (string value) +#certfile= + +# Required if Keystone server requires client certificate +# (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying +# HTTPs connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# Directory used to cache files related to PKI tokens (string +# value) +#signing_dir= + +# If defined, the memcache server(s) to use for caching (list +# value) +# Deprecated group;name - DEFAULT;memcache_servers +#memcached_servers= + +# In order to prevent excessive requests and validations, the +# middleware uses an in-memory cache for the tokens the +# Keystone API returns. This is only valid if memcache_servers +# is defined. Set to -1 to disable caching completely. +# (integer value) +#token_cache_time=300 + +# Value only used for unit testing (integer value) +#revocation_cache_time=1 + +# (optional) if defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable +# values are MAC or ENCRYPT. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token +# data is encrypted and authenticated in the cache. If the +# value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (optional, mandatory if memcache_security_strategy is +# defined) this string is used for key derivation. (string +# value) +#memcache_secret_key= + +# (optional) indicate whether to set the X-Service-Catalog +# header. If False, middleware will not ask for service +# catalog on token validation and will not set the X-Service- +# Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be +# set to: "disabled" to not check token binding. "permissive" +# (default) to validate binding information if the bind type +# is of a form known to the server and ignore it if not. +# "strict" like "permissive" but if the bind type is unknown +# the token will be rejected. "required" any form of token +# binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string +# value) +#enforce_token_bind=permissive + + +[libvirt] + +# +# Options defined in nova.virt.libvirt.driver +# + +# Rescue ami image (string value) +#rescue_image_id= + +# Rescue aki image (string value) +#rescue_kernel_id= + +# Rescue ari image (string value) +#rescue_ramdisk_id= + +# Libvirt domain type (valid options are: kvm, lxc, qemu, uml, +# xen) (string value) +# Deprecated group;name - DEFAULT;libvirt_type +virt_type={{ virt_type }} + +# Override the default libvirt URI (which is dependent on +# virt_type) (string value) +# Deprecated group;name - DEFAULT;libvirt_uri +#connection_uri= + +# Inject the admin password at boot time, without an agent. +# (boolean value) +# Deprecated group;name - DEFAULT;libvirt_inject_password +#inject_password=false + +# Inject the ssh public key at boot time (boolean value) +# Deprecated group;name - DEFAULT;libvirt_inject_key +#inject_key=false + +# The partition to inject to : -2 => disable, -1 => inspect +# (libguestfs only), 0 => not partitioned, >0 => partition +# number (integer value) +# Deprecated group;name - DEFAULT;libvirt_inject_partition +#inject_partition=-2 + +# Sync virtual and real mouse cursors in Windows VMs (boolean +# value) +#use_usb_tablet=true + +# Migration target URI (any included "%s" is replaced with the +# migration target hostname) (string value) +#live_migration_uri=qemu+tcp://%s/system + +# Migration flags to be set for live migration (string value) +#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER + +# Migration flags to be set for block migration (string value) +#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC + +# Maximum bandwidth to be used during migration, in Mbps +# (integer value) +#live_migration_bandwidth=0 + +# Snapshot image format (valid options are : raw, qcow2, vmdk, +# vdi). Defaults to same as source image (string value) +#snapshot_image_format= + +# DEPRECATED. The libvirt VIF driver to configure the +# VIFs.This option is deprecated and will be removed in the +# Juno release. (string value) +# Deprecated group;name - DEFAULT;libvirt_vif_driver +#vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver + +# Libvirt handlers for remote volumes. (list value) +# Deprecated group;name - DEFAULT;libvirt_volume_drivers +#volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver,aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver,glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver,fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver,scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver + +# Override the default disk prefix for the devices attached to +# a server, which is dependent on virt_type. (valid options +# are: sd, xvd, uvd, vd) (string value) +# Deprecated group;name - DEFAULT;libvirt_disk_prefix +#disk_prefix= + +# Number of seconds to wait for instance to shut down after +# soft reboot request is made. We fall back to hard reboot if +# instance does not shutdown within this window. (integer +# value) +# Deprecated group;name - DEFAULT;libvirt_wait_soft_reboot_seconds +#wait_soft_reboot_seconds=120 + +# Set to "host-model" to clone the host CPU feature flags; to +# "host-passthrough" to use the host CPU model exactly; to +# "custom" to use a named CPU model; to "none" to not set any +# CPU model. If virt_type="kvm|qemu", it will default to +# "host-model", otherwise it will default to "none" (string +# value) +# Deprecated group;name - DEFAULT;libvirt_cpu_mode +#cpu_mode= + +# Set to a named libvirt CPU model (see names listed in +# /usr/share/libvirt/cpu_map.xml). Only has effect if +# cpu_mode="custom" and virt_type="kvm|qemu" (string value) +# Deprecated group;name - DEFAULT;libvirt_cpu_model +#cpu_model= + +# Location where libvirt driver will store snapshots before +# uploading them to image service (string value) +# Deprecated group;name - DEFAULT;libvirt_snapshots_directory +#snapshots_directory=$instances_path/snapshots + +# Location where the Xen hvmloader is kept (string value) +#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader + +# Specific cachemodes to use for different disk types e.g: +# file=directsync,block=none (list value) +#disk_cachemodes= + +# A path to a device that will be used as source of entropy on +# the host. Permitted options are: /dev/random or /dev/hwrng +# (string value) +#rng_dev_path= + + +# +# Options defined in nova.virt.libvirt.imagebackend +# + +# VM Images format. Acceptable values are: raw, qcow2, lvm, +# rbd, default. If default is specified, then use_cow_images +# flag is used instead of this one. (string value) +# Deprecated group;name - DEFAULT;libvirt_images_type +#images_type=default + +# LVM Volume Group that is used for VM images, when you +# specify images_type=lvm. (string value) +# Deprecated group;name - DEFAULT;libvirt_images_volume_group +#images_volume_group= + +# Create sparse logical volumes (with virtualsize) if this +# flag is set to True. (boolean value) +# Deprecated group;name - DEFAULT;libvirt_sparse_logical_volumes +#sparse_logical_volumes=false + +# Method used to wipe old volumes (valid options are: none, +# zero, shred) (string value) +#volume_clear=zero + +# Size in MiB to wipe at start of old volumes. 0 => all +# (integer value) +#volume_clear_size=0 + +# The RADOS pool in which rbd volumes are stored (string +# value) +# Deprecated group;name - DEFAULT;libvirt_images_rbd_pool +#images_rbd_pool=rbd + +# Path to the ceph configuration file to use (string value) +# Deprecated group;name - DEFAULT;libvirt_images_rbd_ceph_conf +#images_rbd_ceph_conf= + + +# +# Options defined in nova.virt.libvirt.imagecache +# + +# Allows image information files to be stored in non-standard +# locations (string value) +#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info + +# Should unused kernel images be removed? This is only safe to +# enable if all compute nodes have been updated to support +# this option. This will be enabled by default in future. +# (boolean value) +#remove_unused_kernels=false + +# Unused resized base images younger than this will not be +# removed (integer value) +#remove_unused_resized_minimum_age_seconds=3600 + +# Write a checksum for files in _base to disk (boolean value) +#checksum_base_images=false + +# How frequently to checksum base images (integer value) +#checksum_interval_seconds=3600 + + +# +# Options defined in nova.virt.libvirt.utils +# + +# Compress snapshot images when possible. This currently +# applies exclusively to qcow2 images (boolean value) +# Deprecated group;name - DEFAULT;libvirt_snapshot_compression +#snapshot_compression=false + + +# +# Options defined in nova.virt.libvirt.vif +# + +# Use virtio for bridge interfaces with KVM/QEMU (boolean +# value) +# Deprecated group;name - DEFAULT;libvirt_use_virtio_for_bridges +#use_virtio_for_bridges=true + + +# +# Options defined in nova.virt.libvirt.volume +# + +# Number of times to rescan iSCSI target to find volume +# (integer value) +#num_iscsi_scan_tries=5 + +# Number of times to rescan iSER target to find volume +# (integer value) +#num_iser_scan_tries=5 + +# The RADOS client name for accessing rbd volumes (string +# value) +#rbd_user= + +# The libvirt UUID of the secret for the rbd_uservolumes +# (string value) +#rbd_secret_uuid= + +# Directory where the NFS volume is mounted on the compute +# node (string value) +#nfs_mount_point_base=$state_path/mnt + +# Mount options passedf to the NFS client. See section of the +# nfs man page for details (string value) +#nfs_mount_options= + +# Number of times to rediscover AoE target to find volume +# (integer value) +#num_aoe_discover_tries=3 + +# Directory where the glusterfs volume is mounted on the +# compute node (string value) +#glusterfs_mount_point_base=$state_path/mnt + +# Use multipath connection of the iSCSI volume (boolean value) +# Deprecated group;name - DEFAULT;libvirt_iscsi_use_multipath +#iscsi_use_multipath=false + +# Use multipath connection of the iSER volume (boolean value) +# Deprecated group;name - DEFAULT;libvirt_iser_use_multipath +#iser_use_multipath=false + +# Path or URL to Scality SOFS configuration file (string +# value) +#scality_sofs_config= + +# Base dir where Scality SOFS shall be mounted (string value) +#scality_sofs_mount_point=$state_path/scality + +# Protocols listed here will be accessed directly from QEMU. +# Currently supported protocols: [gluster] (list value) +#qemu_allowed_storage_drivers= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group;name - DEFAULT;matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[metrics] + +# +# Options defined in nova.scheduler.weights.metrics +# + +# Multiplier used for weighing metrics. (floating point value) +#weight_multiplier=1.0 + +# How the metrics are going to be weighed. This should be in +# the form of "=, =, ...", where +# is one of the metrics to be weighed, and is +# the corresponding ratio. So for "name1=1.0, name2=-1.0" The +# final weight would be name1.value * 1.0 + name2.value * +# -1.0. (list value) +#weight_setting= + +# How to treat the unavailable metrics. When a metric is NOT +# available for a host, if it is set to be True, it would +# raise an exception, so it is recommended to use the +# scheduler filter MetricFilter to filter out those hosts. If +# it is set to be False, the unavailable metric would be +# treated as a negative factor in weighing process, the +# returned value would be set by the option +# weight_of_unavailable. (boolean value) +#required=true + +# The final weight value to be returned if required is set to +# False and any one of the metrics set by weight_setting is +# unavailable. (floating point value) +#weight_of_unavailable=-10000.0 + + +[osapi_v3] + +# +# Options defined in nova.api.openstack +# + +# Whether the V3 API is enabled or not (boolean value) +#enabled=false + +# A list of v3 API extensions to never load. Specify the +# extension aliases here. (list value) +#extensions_blacklist= + +# If the list is not empty then a v3 API extension will only +# be loaded if it exists in this list. Specify the extension +# aliases here. (list value) +#extensions_whitelist= + + +[rdp] + +# +# Options defined in nova.rdp +# + +# Location of RDP html5 console proxy, in the form +# "http://127.0.0.1:6083/" (string value) +#html5_proxy_base_url=http://127.0.0.1:6083/ + +# Enable RDP related features (boolean value) +#enabled=false + + +[spice] + +# +# Options defined in nova.spice +# + +# Location of spice HTML5 console proxy, in the form +# "http://127.0.0.1:6082/spice_auto.html" (string value) +#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html + +# IP address on which instance spice server should listen +# (string value) +#server_listen=127.0.0.1 + +# The address to which proxy clients (like nova- +# spicehtml5proxy) should connect (string value) +#server_proxyclient_address=127.0.0.1 + +# Enable spice related features (boolean value) +#enabled=false + +# Enable spice guest agent support (boolean value) +#agent_enabled=true + +# Keymap for spice (string value) +#keymap=en-us + + +[ssl] + +# +# Options defined in nova.openstack.common.sslutils +# + +# CA certificate file to use to verify connecting clients. +# (string value) +#ca_file= + +# Certificate file to use when starting the server securely. +# (string value) +#cert_file= + +# Private key file to use when starting the server securely. +# (string value) +#key_file= + + +[trusted_computing] + +# +# Options defined in nova.scheduler.filters.trusted_filter +# + +# Attestation server HTTP (string value) +#attestation_server= + +# Attestation server Cert file for Identity verification +# (string value) +#attestation_server_ca_file= + +# Attestation server port (string value) +#attestation_port=8443 + +# Attestation web API URL (string value) +#attestation_api_url=/OpenAttestationWebServices/V1.0 + +# Attestation authorization blob - must change (string value) +#attestation_auth_blob= + +# Attestation status cache valid period length (integer value) +#attestation_auth_timeout=60 + + +[upgrade_levels] + +# +# Options defined in nova.baserpc +# + +# Set a version cap for messages sent to the base api in any +# service (string value) +#baseapi= + + +# +# Options defined in nova.cells.rpc_driver +# + +# Set a version cap for messages sent between cells services +# (string value) +#intercell= + + +# +# Options defined in nova.cells.rpcapi +# + +# Set a version cap for messages sent to local cells services +# (string value) +#cells= + + +# +# Options defined in nova.cert.rpcapi +# + +# Set a version cap for messages sent to cert services (string +# value) +#cert= + + +# +# Options defined in nova.compute.rpcapi +# + +# Set a version cap for messages sent to compute services. If +# you plan to do a live upgrade from havana to icehouse, you +# should set this option to "icehouse-compat" before beginning +# the live upgrade procedure. (string value) +#compute= + + +# +# Options defined in nova.conductor.rpcapi +# + +# Set a version cap for messages sent to conductor services +# (string value) +#conductor= + + +# +# Options defined in nova.console.rpcapi +# + +# Set a version cap for messages sent to console services +# (string value) +#console= + + +# +# Options defined in nova.consoleauth.rpcapi +# + +# Set a version cap for messages sent to consoleauth services +# (string value) +#consoleauth= + + +# +# Options defined in nova.network.rpcapi +# + +# Set a version cap for messages sent to network services +# (string value) +#network= + + +# +# Options defined in nova.scheduler.rpcapi +# + +# Set a version cap for messages sent to scheduler services +# (string value) +#scheduler= + + +[vmware] + +# +# Options defined in nova.virt.vmwareapi.driver +# + +# Hostname or IP address for connection to VMware ESX/VC host. +# (string value) +#host_ip= + +# Username for connection to VMware ESX/VC host. (string +# value) +#host_username= + +# Password for connection to VMware ESX/VC host. (string +# value) +#host_password= + +# Name of a VMware Cluster ComputeResource. Used only if +# compute_driver is vmwareapi.VMwareVCDriver. (multi valued) +#cluster_name= + +# Regex to match the name of a datastore. (string value) +#datastore_regex= + +# The interval used for polling of remote tasks. (floating +# point value) +#task_poll_interval=0.5 + +# The number of times we retry on failures, e.g., socket +# error, etc. (integer value) +#api_retry_count=10 + +# VNC starting port (integer value) +#vnc_port=5900 + +# Total number of VNC ports (integer value) +#vnc_port_total=10000 + +# Whether to use linked clone (boolean value) +#use_linked_clone=true + + +# +# Options defined in nova.virt.vmwareapi.vif +# + +# Physical ethernet adapter name for vlan networking (string +# value) +#vlan_interface=vmnic0 + + +# +# Options defined in nova.virt.vmwareapi.vim +# + +# Optional VIM Service WSDL Location e.g +# http:///vimService.wsdl. Optional over-ride to +# default location for bug work-arounds (string value) +#wsdl_location= + + +# +# Options defined in nova.virt.vmwareapi.vim_util +# + +# The maximum number of ObjectContent data objects that should +# be returned in a single result. A positive value will cause +# the operation to suspend the retrieval when the count of +# objects reaches the specified maximum. The server may still +# limit the count to something less than the configured value. +# Any remaining objects may be retrieved with additional +# requests. (integer value) +#maximum_objects=100 + + +# +# Options defined in nova.virt.vmwareapi.vmops +# + +# Name of Integration Bridge (string value) +#integration_bridge=br-int + + +[xenserver] + +# +# Options defined in nova.virt.xenapi.agent +# + +# Number of seconds to wait for agent reply (integer value) +# Deprecated group;name - DEFAULT;agent_timeout +#agent_timeout=30 + +# Number of seconds to wait for agent to be fully operational +# (integer value) +# Deprecated group;name - DEFAULT;agent_version_timeout +#agent_version_timeout=300 + +# Number of seconds to wait for agent reply to resetnetwork +# request (integer value) +# Deprecated group;name - DEFAULT;agent_resetnetwork_timeout +#agent_resetnetwork_timeout=60 + +# Specifies the path in which the XenAPI guest agent should be +# located. If the agent is present, network configuration is +# not injected into the image. Used if +# compute_driver=xenapi.XenAPIDriver and flat_injected=True +# (string value) +# Deprecated group;name - DEFAULT;xenapi_agent_path +#agent_path=usr/sbin/xe-update-networking + +# Disables the use of the XenAPI agent in any image regardless +# of what image properties are present. (boolean value) +# Deprecated group;name - DEFAULT;xenapi_disable_agent +#disable_agent=false + +# Determines if the XenAPI agent should be used when the image +# used does not contain a hint to declare if the agent is +# present or not. The hint is a glance property +# "xenapi_use_agent" that has the value "True" or "False". +# Note that waiting for the agent when it is not present will +# significantly increase server boot times. (boolean value) +# Deprecated group;name - DEFAULT;xenapi_use_agent_default +#use_agent_default=false + + +# +# Options defined in nova.virt.xenapi.client.session +# + +# Timeout in seconds for XenAPI login. (integer value) +# Deprecated group;name - DEFAULT;xenapi_login_timeout +#login_timeout=10 + +# Maximum number of concurrent XenAPI connections. Used only +# if compute_driver=xenapi.XenAPIDriver (integer value) +# Deprecated group;name - DEFAULT;xenapi_connection_concurrent +#connection_concurrent=5 + + +# +# Options defined in nova.virt.xenapi.driver +# + +# URL for connection to XenServer/Xen Cloud Platform. A +# special value of unix://local can be used to connect to the +# local unix socket. Required if +# compute_driver=xenapi.XenAPIDriver (string value) +# Deprecated group;name - DEFAULT;xenapi_connection_url +#connection_url= + +# Username for connection to XenServer/Xen Cloud Platform. +# Used only if compute_driver=xenapi.XenAPIDriver (string +# value) +# Deprecated group;name - DEFAULT;xenapi_connection_username +#connection_username=root + +# Password for connection to XenServer/Xen Cloud Platform. +# Used only if compute_driver=xenapi.XenAPIDriver (string +# value) +# Deprecated group;name - DEFAULT;xenapi_connection_password +#connection_password= + +# The interval used for polling of coalescing vhds. Used only +# if compute_driver=xenapi.XenAPIDriver (floating point value) +# Deprecated group;name - DEFAULT;xenapi_vhd_coalesce_poll_interval +#vhd_coalesce_poll_interval=5.0 + +# Ensure compute service is running on host XenAPI connects +# to. (boolean value) +# Deprecated group;name - DEFAULT;xenapi_check_host +#check_host=true + +# Max number of times to poll for VHD to coalesce. Used only +# if compute_driver=xenapi.XenAPIDriver (integer value) +# Deprecated group;name - DEFAULT;xenapi_vhd_coalesce_max_attempts +#vhd_coalesce_max_attempts=20 + +# Base path to the storage repository (string value) +# Deprecated group;name - DEFAULT;xenapi_sr_base_path +#sr_base_path=/var/run/sr-mount + +# The iSCSI Target Host (string value) +# Deprecated group;name - DEFAULT;target_host +#target_host= + +# The iSCSI Target Port, default is port 3260 (string value) +# Deprecated group;name - DEFAULT;target_port +#target_port=3260 + +# IQN Prefix (string value) +# Deprecated group;name - DEFAULT;iqn_prefix +#iqn_prefix=iqn.2010-10.org.openstack + +# Used to enable the remapping of VBD dev (Works around an +# issue in Ubuntu Maverick) (boolean value) +# Deprecated group;name - DEFAULT;xenapi_remap_vbd_dev +#remap_vbd_dev=false + +# Specify prefix to remap VBD dev to (ex. /dev/xvdb -> +# /dev/sdb) (string value) +# Deprecated group;name - DEFAULT;xenapi_remap_vbd_dev_prefix +#remap_vbd_dev_prefix=sd + + +# +# Options defined in nova.virt.xenapi.image.bittorrent +# + +# Base URL for torrent files. (string value) +# Deprecated group;name - DEFAULT;xenapi_torrent_base_url +#torrent_base_url= + +# Probability that peer will become a seeder. (1.0 = 100%) +# (floating point value) +# Deprecated group;name - DEFAULT;xenapi_torrent_seed_chance +#torrent_seed_chance=1.0 + +# Number of seconds after downloading an image via BitTorrent +# that it should be seeded for other peers. (integer value) +# Deprecated group;name - DEFAULT;xenapi_torrent_seed_duration +#torrent_seed_duration=3600 + +# Cached torrent files not accessed within this number of +# seconds can be reaped (integer value) +# Deprecated group;name - DEFAULT;xenapi_torrent_max_last_accessed +#torrent_max_last_accessed=86400 + +# Beginning of port range to listen on (integer value) +# Deprecated group;name - DEFAULT;xenapi_torrent_listen_port_start +#torrent_listen_port_start=6881 + +# End of port range to listen on (integer value) +# Deprecated group;name - DEFAULT;xenapi_torrent_listen_port_end +#torrent_listen_port_end=6891 + +# Number of seconds a download can remain at the same progress +# percentage w/o being considered a stall (integer value) +# Deprecated group;name - DEFAULT;xenapi_torrent_download_stall_cutoff +#torrent_download_stall_cutoff=600 + +# Maximum number of seeder processes to run concurrently +# within a given dom0. (-1 = no limit) (integer value) +# Deprecated group;name - DEFAULT;xenapi_torrent_max_seeder_processes_per_host +#torrent_max_seeder_processes_per_host=1 + + +# +# Options defined in nova.virt.xenapi.pool +# + +# To use for hosts with different CPUs (boolean value) +# Deprecated group;name - DEFAULT;use_join_force +#use_join_force=true + + +# +# Options defined in nova.virt.xenapi.vif +# + +# Name of Integration Bridge used by Open vSwitch (string +# value) +# Deprecated group;name - DEFAULT;xenapi_ovs_integration_bridge +#ovs_integration_bridge=xapi1 + + +# +# Options defined in nova.virt.xenapi.vm_utils +# + +# Cache glance images locally. `all` will cache all images, +# `some` will only cache images that have the image_property +# `cache_in_nova=True`, and `none` turns off caching entirely +# (string value) +# Deprecated group;name - DEFAULT;cache_images +#cache_images=all + +# Compression level for images, e.g., 9 for gzip -9. Range is +# 1-9, 9 being most compressed but most CPU intensive on dom0. +# (integer value) +# Deprecated group;name - DEFAULT;xenapi_image_compression_level +#image_compression_level= + +# Default OS type (string value) +# Deprecated group;name - DEFAULT;default_os_type +#default_os_type=linux + +# Time to wait for a block device to be created (integer +# value) +# Deprecated group;name - DEFAULT;block_device_creation_timeout +#block_device_creation_timeout=10 + +# Maximum size in bytes of kernel or ramdisk images (integer +# value) +# Deprecated group;name - DEFAULT;max_kernel_ramdisk_size +#max_kernel_ramdisk_size=16777216 + +# Filter for finding the SR to be used to install guest +# instances on. To use the Local Storage in default +# XenServer/XCP installations set this flag to other-config +# :i18n-key=local-storage. To select an SR with a different +# matching criteria, you could set it to other- +# config:my_favorite_sr=true. On the other hand, to fall back +# on the Default SR, as displayed by XenCenter, set this flag +# to: default-sr:true (string value) +# Deprecated group;name - DEFAULT;sr_matching_filter +#sr_matching_filter=default-sr:true + +# Whether to use sparse_copy for copying data on a resize down +# (False will use standard dd). This speeds up resizes down +# considerably since large runs of zeros won't have to be +# rsynced (boolean value) +# Deprecated group;name - DEFAULT;xenapi_sparse_copy +#sparse_copy=true + +# Maximum number of retries to unplug VBD (integer value) +# Deprecated group;name - DEFAULT;xenapi_num_vbd_unplug_retries +#num_vbd_unplug_retries=10 + +# Whether or not to download images via Bit Torrent +# (all|some|none). (string value) +# Deprecated group;name - DEFAULT;xenapi_torrent_images +#torrent_images=none + +# Name of network to use for booting iPXE ISOs (string value) +# Deprecated group;name - DEFAULT;xenapi_ipxe_network_name +#ipxe_network_name= + +# URL to the iPXE boot menu (string value) +# Deprecated group;name - DEFAULT;xenapi_ipxe_boot_menu_url +#ipxe_boot_menu_url= + +# Name and optionally path of the tool used for ISO image +# creation (string value) +# Deprecated group;name - DEFAULT;xenapi_ipxe_mkisofs_cmd +#ipxe_mkisofs_cmd=mkisofs + + +# +# Options defined in nova.virt.xenapi.vmops +# + +# Number of seconds to wait for instance to go to running +# state (integer value) +# Deprecated group;name - DEFAULT;xenapi_running_timeout +#running_timeout=60 + +# The XenAPI VIF driver using XenServer Network APIs. (string +# value) +# Deprecated group;name - DEFAULT;xenapi_vif_driver +#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver + +# Dom0 plugin driver used to handle image uploads. (string +# value) +# Deprecated group;name - DEFAULT;xenapi_image_upload_handler +#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore + + +# +# Options defined in nova.virt.xenapi.volume_utils +# + +# Number of seconds to wait for an SR to settle if the VDI +# does not exist when first introduced (integer value) +#introduce_vdi_retry_wait=20 + + +[zookeeper] + +# +# Options defined in nova.servicegroup.drivers.zk +# + +# The ZooKeeper addresses for servicegroup service in the +# format of host1:port,host2:port,host3:port (string value) +#address= + +# The recv_timeout parameter for the zk session (integer +# value) +#recv_timeout=4000 + +# The prefix used in ZooKeeper to store ephemeral nodes +# (string value) +#sg_prefix=/servicegroups + +# Number of seconds to wait until retrying to join the session +# (integer value) +#sg_retry_interval=5 + + diff --git a/tools/ansible-openstack/templates/etc/ntp.conf b/tools/ansible-openstack/templates/etc/ntp.conf new file mode 100644 index 0000000..f1a53fd --- /dev/null +++ b/tools/ansible-openstack/templates/etc/ntp.conf @@ -0,0 +1,8 @@ +driftfile /var/lib/ntp/drift +restrict default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery +restrict 127.0.0.1 +restrict -6 ::1 +server {{ ntp_server }} +includefile /etc/ntp/crypto/pw +keys /etc/ntp/keys diff --git a/tools/ansible-openstack/templates/etc/openstack-dashboard/local_settings b/tools/ansible-openstack/templates/etc/openstack-dashboard/local_settings new file mode 100644 index 0000000..1bedfef --- /dev/null +++ b/tools/ansible-openstack/templates/etc/openstack-dashboard/local_settings @@ -0,0 +1,490 @@ +import os + +from django.utils.translation import ugettext_lazy as _ + +from openstack_dashboard import exceptions + +DEBUG = False +TEMPLATE_DEBUG = DEBUG + +# Required for Django 1.5. +# If horizon is running in production (DEBUG is False), set this +# with the list of host/domain names that the application can serve. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts +#ALLOWED_HOSTS = ['horizon.example.com', 'localhost'] + +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +#CSRF_COOKIE_SECURE = True +#SESSION_COOKIE_SECURE = True + +# Overrides for OpenStack API versions. Use this setting to force the +# OpenStack dashboard to use a specific API version for a given service API. +# NOTE: The version should be formatted as it appears in the URL for the +# service API. For example, The identity service APIs have inconsistent +# use of the decimal point, so valid options would be "2.0" or "3". +# OPENSTACK_API_VERSIONS = { +# "identity": 3, +# "volume": 2 +# } + +# Set this to True if running on multi-domain model. When this is enabled, it +# will require user to enter the Domain name in addition to username for login. +# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False + +# Overrides the default domain used when running on single-domain model +# with Keystone V3. All entities will be created in the default domain. +# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' + +# Set Console type: +# valid options would be "AUTO", "VNC", "SPICE" or "RDP" +# CONSOLE_TYPE = "AUTO" + +# Default OpenStack Dashboard configuration. +HORIZON_CONFIG = { + 'dashboards': ('project', 'admin', 'settings',), + 'default_dashboard': 'project', + 'user_home': 'openstack_dashboard.views.get_user_home', + 'ajax_queue_limit': 10, + 'auto_fade_alerts': { + 'delay': 3000, + 'fade_duration': 1500, + 'types': ['alert-success', 'alert-info'] + }, + 'help_url': "http://docs.openstack.org", + 'exceptions': {'recoverable': exceptions.RECOVERABLE, + 'not_found': exceptions.NOT_FOUND, + 'unauthorized': exceptions.UNAUTHORIZED}, +} + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# } + +# Disable simplified floating IP address management for deployments with +# multiple floating IP pools or complex network requirements. +# HORIZON_CONFIG["simple_ip_management"] = False + +# Turn off browser autocompletion for the login form if so desired. +# HORIZON_CONFIG["password_autocomplete"] = "off" + + +# Set custom secret key: +# You can either set it to a specific value or you can let horizion generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. +from horizon.utils import secret_key +LOCAL_PATH = '/tmp' +SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH, '.secret_key_store')) + +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like +# CACHES = { +# 'default': { +# 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', +# 'LOCATION' : '127.0.0.1:11211', +# } +#} + +CACHES = { + 'default': { + 'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache' + } +} + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +# EMAIL_HOST = 'smtp.my-company.com' +# EMAIL_PORT = 25 +# EMAIL_HOST_USER = 'djangomail' +# EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "{{ frontend_int_ip }}" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" + +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + +# The CA certificate to use to verify SSL connections +# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_group': True, + 'can_edit_project': True, + 'can_edit_domain': True, + 'can_edit_role': True +} + +#Setting this to True, will add a new "Retrieve Password" action on instance, +#allowing Admin session password retrieval/decryption. +#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False + +# The Xen Hypervisor has the ability to set the mount point for volumes +# attached to instances (other Hypervisors currently do not). Setting +# can_set_mount_point to True will add the option to set the mount point +# from the UI. +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': False, + 'can_set_password': False, +} + +# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional +# services provided by neutron. Options currently available are load +# balancer service, security groups, quotas, VPN service. +OPENSTACK_NEUTRON_NETWORK = { + 'enable_lb': True, + 'enable_firewall': False, + 'enable_quotas': True, + 'enable_vpn': False, + # The profile_support option is used to detect if an external router can be + # configured via the dashboard. When using specific plugins the + # profile_support can be turned on if needed. + 'profile_support': None, + #'profile_support': 'cisco', +} + +# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features +# in the OpenStack Dashboard related to the Image service, such as the list +# of supported image formats. +# OPENSTACK_IMAGE_BACKEND = { +# 'image_formats': [ +# ('', ''), +# ('aki', _('AKI - Amazon Kernel Image')), +# ('ami', _('AMI - Amazon Machine Image')), +# ('ari', _('ARI - Amazon Ramdisk Image')), +# ('iso', _('ISO - Optical Disk Image')), +# ('qcow2', _('QCOW2 - QEMU Emulator')), +# ('raw', _('Raw')), +# ('vdi', _('VDI')), +# ('vhd', _('VHD')), +# ('vmdk', _('VMDK')) +# ] +# } + +# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for +# image custom property attributes that appear on image detail pages. +IMAGE_CUSTOM_PROPERTY_TITLES = { + "architecture": _("Architecture"), + "kernel_id": _("Kernel ID"), + "ramdisk_id": _("Ramdisk ID"), + "image_state": _("Euca2ools state"), + "project_id": _("Project ID"), + "image_type": _("Image Type") +} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'publicURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the +# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is None. This +# value should differ from OPENSTACK_ENDPOINT_TYPE if used. +#SECONDARY_ENDPOINT_TYPE = "publicURL" + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +# When launching an instance, the menu of available flavors is +# sorted by RAM usage, ascending. If you would like a different sort order, +# you can provide another flavor attribute as sorting key. Alternatively, you +# can provide a custom callback method to use for sorting. You can also provide +# a flag for reverse sort. For more info, see +# http://docs.python.org/2/library/functions.html#sorted +# CREATE_INSTANCE_FLAVOR_SORT = { +# 'key': 'name', +# # or +# 'key': my_awesome_callback_method, +# 'reverse': False, +# } + +# The Horizon Policy Enforcement engine uses these values to load per service +# policy rule files. The content of these files should match the files the +# OpenStack services are using to determine role based access control in the +# target installation. + +# Path to directory containing policy.json files +# POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") +POLICY_FILES_PATH = '/etc/openstack-dashboard' +# Map of local copy of service policy files +#POLICY_FILES = { +# 'identity': 'keystone_policy.json', +# 'compute': 'nova_policy.json', +# 'volume': 'cinder_policy.json', +# 'image': 'glance_policy.json', +#} + +# Trove user and database extension support. By default support for +# creating users and databases on database instances is turned on. +# To disable these extensions set the permission here to something +# unusable such as ["!"]. +# TROVE_ADD_USER_PERMS = [] +# TROVE_ADD_DATABASE_PERMS = [] + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'django.utils.log.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'cinderclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'neutronclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'heatclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'ceilometerclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'troveclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'swiftclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_auth': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'django': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'iso8601': { + 'handlers': ['null'], + 'propagate': False, + }, + } +} + +# 'direction' should not be specified for all_tcp/udp/icmp. +# It is specified in the form. +SECURITY_GROUP_RULES = { + 'all_tcp': { + 'name': 'ALL TCP', + 'ip_protocol': 'tcp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_udp': { + 'name': 'ALL UDP', + 'ip_protocol': 'udp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_icmp': { + 'name': 'ALL ICMP', + 'ip_protocol': 'icmp', + 'from_port': '-1', + 'to_port': '-1', + }, + 'ssh': { + 'name': 'SSH', + 'ip_protocol': 'tcp', + 'from_port': '22', + 'to_port': '22', + }, + 'smtp': { + 'name': 'SMTP', + 'ip_protocol': 'tcp', + 'from_port': '25', + 'to_port': '25', + }, + 'dns': { + 'name': 'DNS', + 'ip_protocol': 'tcp', + 'from_port': '53', + 'to_port': '53', + }, + 'http': { + 'name': 'HTTP', + 'ip_protocol': 'tcp', + 'from_port': '80', + 'to_port': '80', + }, + 'pop3': { + 'name': 'POP3', + 'ip_protocol': 'tcp', + 'from_port': '110', + 'to_port': '110', + }, + 'imap': { + 'name': 'IMAP', + 'ip_protocol': 'tcp', + 'from_port': '143', + 'to_port': '143', + }, + 'ldap': { + 'name': 'LDAP', + 'ip_protocol': 'tcp', + 'from_port': '389', + 'to_port': '389', + }, + 'https': { + 'name': 'HTTPS', + 'ip_protocol': 'tcp', + 'from_port': '443', + 'to_port': '443', + }, + 'smtps': { + 'name': 'SMTPS', + 'ip_protocol': 'tcp', + 'from_port': '465', + 'to_port': '465', + }, + 'imaps': { + 'name': 'IMAPS', + 'ip_protocol': 'tcp', + 'from_port': '993', + 'to_port': '993', + }, + 'pop3s': { + 'name': 'POP3S', + 'ip_protocol': 'tcp', + 'from_port': '995', + 'to_port': '995', + }, + 'ms_sql': { + 'name': 'MS SQL', + 'ip_protocol': 'tcp', + 'from_port': '1433', + 'to_port': '1433', + }, + 'mysql': { + 'name': 'MYSQL', + 'ip_protocol': 'tcp', + 'from_port': '3306', + 'to_port': '3306', + }, + 'rdp': { + 'name': 'RDP', + 'ip_protocol': 'tcp', + 'from_port': '3389', + 'to_port': '3389', + }, +} + +FLAVOR_EXTRA_KEYS = { + 'flavor_keys': [ + ('quota:read_bytes_sec', _('Quota: Read bytes')), + ('quota:write_bytes_sec', _('Quota: Write bytes')), + ('quota:cpu_quota', _('Quota: CPU')), + ('quota:cpu_period', _('Quota: CPU period')), + ('quota:inbound_average', _('Quota: Inbound average')), + ('quota:outbound_average', _('Quota: Outbound average')), + ] +} diff --git a/tools/ansible-openstack/templates/etc/rabbitmq/rabbitmq-env.conf b/tools/ansible-openstack/templates/etc/rabbitmq/rabbitmq-env.conf new file mode 100644 index 0000000..65ed733 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/rabbitmq/rabbitmq-env.conf @@ -0,0 +1,2 @@ +RABBITMQ_SERVER_ERL_ARGS="+K true +A30 +P 1048576" + diff --git a/tools/ansible-openstack/templates/etc/rabbitmq/rabbitmq.config b/tools/ansible-openstack/templates/etc/rabbitmq/rabbitmq.config new file mode 100644 index 0000000..945bfa4 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/rabbitmq/rabbitmq.config @@ -0,0 +1,3 @@ +[ + {kernel,[{inet_dist_listen_min, {{ amqp_erlang_port }} },{inet_dist_listen_max, {{ amqp_erlang_port }} }]} +]. diff --git a/tools/ansible-openstack/templates/etc/sysconfig/bridge.modules b/tools/ansible-openstack/templates/etc/sysconfig/bridge.modules new file mode 100644 index 0000000..e242da4 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/sysconfig/bridge.modules @@ -0,0 +1,4 @@ +#!/bin/bash + +/sbin/modprobe bridge > /dev/null 2>&1 + diff --git a/tools/ansible-openstack/templates/etc/sysconfig/memcached b/tools/ansible-openstack/templates/etc/sysconfig/memcached new file mode 100644 index 0000000..3399623 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/sysconfig/memcached @@ -0,0 +1,5 @@ +PORT="11211" +USER="memcached" +MAXCONN="1024" +CACHESIZE="1024" +OPTIONS="-l {{ my_int_ip }} " diff --git a/tools/ansible-openstack/templates/etc/yum.repos.d/CentOS-Base.repo b/tools/ansible-openstack/templates/etc/yum.repos.d/CentOS-Base.repo new file mode 100644 index 0000000..55d3609 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/yum.repos.d/CentOS-Base.repo @@ -0,0 +1,46 @@ +[base] +name=CentOS-$releasever - Base +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os +#baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/ +baseurl=http://192.168.100.253/norarepo/centos/6/os/$basearch/ +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 + +#released updates +[updates] +name=CentOS-$releasever - Updates +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates +#baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/ +baseurl=http://192.168.100.253/norarepo/centos/6/updates/$basearch/ +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 + +#additional packages that may be useful +[extras] +name=CentOS-$releasever - Extras +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras +#baseurl=http://mirror.centos.org/centos/$releasever/extras/$basearch/ +baseurl=http://192.168.100.253/norarepo/centos/6/extras/$basearch/ +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 + +#additional packages that extend functionality of existing packages +[centosplus] +name=CentOS-$releasever - Plus +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus +#baseurl=http://mirror.centos.org/centos/$releasever/centosplus/$basearch/ +baseurl=http://192.168.100.253/norarepo/centos/6/centosplus/$basearch/ +gpgcheck=0 +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 + +#contrib - packages by Centos Users +[contrib] +name=CentOS-$releasever - Contrib +#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=contrib +#baseurl=http://mirror.centos.org/centos/$releasever/contrib/$basearch/ +baseurl=http://192.168.100.253/norarepo/centos/6/contrib/$basearch/ +gpgcheck=0 +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6 + diff --git a/tools/ansible-openstack/templates/etc/yum.repos.d/epel-testing.repo b/tools/ansible-openstack/templates/etc/yum.repos.d/epel-testing.repo new file mode 100644 index 0000000..637a34e --- /dev/null +++ b/tools/ansible-openstack/templates/etc/yum.repos.d/epel-testing.repo @@ -0,0 +1,28 @@ +[epel-testing] +name=Extra Packages for Enterprise Linux 6 - Testing - $basearch +#baseurl=http://download.fedoraproject.org/pub/epel/testing/6/$basearch +#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=testing-epel6&arch=$basearch +baseurl=http://192.168.100.253/norarepo/epel/testing/6/$basearch +failovermethod=priority +enabled=0 +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 + +[epel-testing-debuginfo] +name=Extra Packages for Enterprise Linux 6 - Testing - $basearch - Debug +#baseurl=http://download.fedoraproject.org/pub/epel/testing/6/$basearch/debug +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=testing-debug-epel6&arch=$basearch +failovermethod=priority +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 +gpgcheck=1 + +[epel-testing-source] +name=Extra Packages for Enterprise Linux 6 - Testing - $basearch - Source +#baseurl=http://download.fedoraproject.org/pub/epel/testing/6/SRPMS +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=testing-source-epel6&arch=$basearch +failovermethod=priority +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 +gpgcheck=1 + diff --git a/tools/ansible-openstack/templates/etc/yum.repos.d/epel.repo b/tools/ansible-openstack/templates/etc/yum.repos.d/epel.repo new file mode 100644 index 0000000..6e2e919 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/yum.repos.d/epel.repo @@ -0,0 +1,27 @@ +[epel] +name=Extra Packages for Enterprise Linux 6 - $basearch +#baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch +#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch +baseurl=http://192.168.100.253/norarepo/epel/6/$basearch +failovermethod=priority +enabled=1 +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 + +[epel-debuginfo] +name=Extra Packages for Enterprise Linux 6 - $basearch - Debug +#baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch/debug +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-6&arch=$basearch +failovermethod=priority +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 +gpgcheck=1 + +[epel-source] +name=Extra Packages for Enterprise Linux 6 - $basearch - Source +#baseurl=http://download.fedoraproject.org/pub/epel/6/SRPMS +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-6&arch=$basearch +failovermethod=priority +enabled=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6 +gpgcheck=1 \ No newline at end of file diff --git a/tools/ansible-openstack/templates/etc/yum.repos.d/rdo-release.repo b/tools/ansible-openstack/templates/etc/yum.repos.d/rdo-release.repo new file mode 100644 index 0000000..e77ddf2 --- /dev/null +++ b/tools/ansible-openstack/templates/etc/yum.repos.d/rdo-release.repo @@ -0,0 +1,10 @@ +[openstack-icehouse] +name=OpenStack Icehouse Repository +#baseurl=http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/epel-6/ +baseurl=http://192.168.100.253/norarepo/openstack/openstack-icehouse/epel-6/ +enabled=1 +skip_if_unavailable=0 +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-RDO-Icehouse +priority=98 + diff --git a/tools/ansible-openstack/templates/root/.my.cnf b/tools/ansible-openstack/templates/root/.my.cnf new file mode 100644 index 0000000..f3d989b --- /dev/null +++ b/tools/ansible-openstack/templates/root/.my.cnf @@ -0,0 +1,5 @@ +[client] +user=root +password={{ root_db_password }} +host=localhost + diff --git a/tools/ansible-openstack/templates/root/keystonerc_admin b/tools/ansible-openstack/templates/root/keystonerc_admin new file mode 100644 index 0000000..5fa95cf --- /dev/null +++ b/tools/ansible-openstack/templates/root/keystonerc_admin @@ -0,0 +1,7 @@ +export OS_USERNAME={{ admin_user }} +export OS_TENANT_NAME={{ admin_tenant }} +export OS_PASSWORD={{ admin_password }} +export OS_AUTH_URL=http://{{frontend_int_ip}}:35357/v2.0 +export PS1="${PS1}[] " + + diff --git a/tools/ansible-openstack/templates/root/openrc b/tools/ansible-openstack/templates/root/openrc new file mode 100644 index 0000000..0befe52 --- /dev/null +++ b/tools/ansible-openstack/templates/root/openrc @@ -0,0 +1,12 @@ +if [ "$1" == "" -o "$2" == "" ]; then + echo "usage: $ source openstackrc <> <>" +else + export OS_TENANT_NAME=$1 + export OS_USERNAME=$2 + export OS_PASSWORD=$2 + export OS_AUTH_URL=http://{{frontend_ext_ip}}:5000/v2.0 + export PS1="${PS1}[( $OS_TENANT_NAME - \$OS_USERNAME )] $ " +fi + + + diff --git a/tools/rack_client.py b/tools/rack_client.py new file mode 100755 index 0000000..7f431c4 --- /dev/null +++ b/tools/rack_client.py @@ -0,0 +1,436 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#!/usr/bin/env python + +import argparse +import json +import os +import requests +import sys +import logging + +from keystoneclient.v2_0 import client as keystone_client + + +def get_parser(): + parser = argparse.ArgumentParser(description="rack client") + parser.add_argument("--noauth", action="store_false", help="request without auth") + parser.add_argument("--os-username", default=os.getenv("OS_USERNAME"), help="") + parser.add_argument("--os-password", default=os.getenv("OS_PASSWORD"), help="") + parser.add_argument("--os-tenant_name", default=os.getenv("OS_TENANT_NAME"), help="") + parser.add_argument("--os-auth_url", default=os.getenv("OS_AUTH_URL"), help="") + + parser.add_argument("--debug", action="store_true", help="print debugging output") + parser.add_argument("--url", default="http://localhost:8088/v1/", help="rack-api endpoint") + subparsers = parser.add_subparsers(help="commands") + + #group list + group_list_parser = subparsers.add_parser("group-list", help="list groups") + group_list_parser.set_defaults(func=group_list) + + #group show + group_list_parser = subparsers.add_parser("group-show", help="list group details") + group_list_parser.add_argument("--gid", action="store", required=True) + group_list_parser.set_defaults(func=group_show) + + #group create + group_create_parser = subparsers.add_parser("group-create", help="create group") + group_create_parser.add_argument("--name", action="store") + group_create_parser.add_argument("--description", action="store") + group_create_parser.set_defaults(func=group_create) + + #group update + group_create_parser = subparsers.add_parser("group-update", help="update group") + group_create_parser.add_argument("--gid", action="store", required=True) + group_create_parser.add_argument("--name", action="store") + group_create_parser.add_argument("--description", action="store") + group_create_parser.set_defaults(func=group_update) + + #group delete + group_delete_parser = subparsers.add_parser("group-delete", help="delete group") + group_delete_parser.add_argument("--gid", action="store", required=True) + group_delete_parser.set_defaults(func=group_delete) + + #keypair list + keypair_list_parser = subparsers.add_parser("keypair-list", help="list keypairs") + keypair_list_parser.add_argument("--gid", action="store", required=True) + keypair_list_parser.set_defaults(func=keypair_list) + + #keypair show + keypair_show_parser = subparsers.add_parser("keypair-show", help="list keypair details") + keypair_show_parser.add_argument("--gid", action="store", required=True) + keypair_show_parser.add_argument("--keypair_id", action="store", required=True) + keypair_show_parser.set_defaults(func=keypair_show) + + #keypair create + keypair_create_parser = subparsers.add_parser("keypair-create", help="create keypair") + keypair_create_parser.add_argument("--gid", action="store", required=True) + keypair_create_parser.add_argument("--name", action="store") + keypair_create_parser.add_argument("--is_default", action="store") + keypair_create_parser.set_defaults(func=keypair_create) + + #keypair update + keypair_update_parser = subparsers.add_parser("keypair-update", help="update keypair") + keypair_update_parser.add_argument("--gid", action="store", required=True) + keypair_update_parser.add_argument("--keypair_id", action="store", required=True) + keypair_update_parser.add_argument("--is_default", action="store", required=True) + keypair_update_parser.set_defaults(func=keypair_update) + + #keypair delete + keypair_delete_parser = subparsers.add_parser("keypair-delete", help="delete keypair") + keypair_delete_parser.add_argument("--gid", action="store", required=True) + keypair_delete_parser.add_argument("--keypair_id", action="store", required=True) + keypair_delete_parser.set_defaults(func=keypair_delete) + + #securitygroup list + securitygroup_list_parser = subparsers.add_parser("securitygroup-list", help="list securitygroups") + securitygroup_list_parser.add_argument("--gid", action="store", required=True) + securitygroup_list_parser.set_defaults(func=securitygroup_list) + + #securitygroup show + securitygroup_show_parser = subparsers.add_parser("securitygroup-show", help="list securitygroup details") + securitygroup_show_parser.add_argument("--gid", action="store", required=True) + securitygroup_show_parser.add_argument("--securitygroup_id", action="store", required=True) + securitygroup_show_parser.set_defaults(func=securitygroup_show) + + #securitygroup create + securitygroup_create_parser = subparsers.add_parser("securitygroup-create", help="create securitygroup") + securitygroup_create_parser.add_argument("--gid", action="store", required=True) + securitygroup_create_parser.add_argument("--name", action="store") + securitygroup_create_parser.add_argument("--is_default", action="store") + securitygroup_create_parser.add_argument("--securitygrouprules", metavar="key1=value1[,key2=value2...]", action="store", nargs="+", default=[]) + securitygroup_create_parser.set_defaults(func=securitygroup_create) + + #securitygroup update + securitygroup_update_parser = subparsers.add_parser("securitygroup-update", help="update securitygroup") + securitygroup_update_parser.add_argument("--gid", action="store", required=True) + securitygroup_update_parser.add_argument("--securitygroup_id", action="store", required=True) + securitygroup_update_parser.add_argument("--is_default", action="store", required=True) + securitygroup_update_parser.set_defaults(func=securitygroup_update) + + #securitygroup delete + securitygroup_delete_parser = subparsers.add_parser("securitygroup-delete", help="delete securitygroup") + securitygroup_delete_parser.add_argument("--gid", action="store", required=True) + securitygroup_delete_parser.add_argument("--securitygroup_id", action="store", required=True) + securitygroup_delete_parser.set_defaults(func=securitygroup_delete) + + #network list + network_list_parser = subparsers.add_parser("network-list", help="list networks") + network_list_parser.add_argument("--gid", action="store", required=True) + network_list_parser.set_defaults(func=network_list) + + #network show + network_show_parser = subparsers.add_parser("network-show", help="list network details") + network_show_parser.add_argument("--gid", action="store", required=True) + network_show_parser.add_argument("--network_id", action="store", required=True) + network_show_parser.set_defaults(func=network_show) + + #network create + network_create_parser = subparsers.add_parser("network-create", help="create network") + network_create_parser.add_argument("--gid", action="store", required=True) + network_create_parser.add_argument("--name", action="store") + network_create_parser.add_argument("--cidr", action="store", required=True) + network_create_parser.add_argument("--is_admin", action="store") + network_create_parser.add_argument("--gateway", action="store") + network_create_parser.add_argument("--dns_nameservers", action="store", nargs="+") + network_create_parser.add_argument("--ext_router_id", action="store") + network_create_parser.set_defaults(func=network_create) + + #network update + network_update_parser = subparsers.add_parser("network-update", help="update network") + network_update_parser.add_argument("--gid", action="store", required=True) + network_update_parser.add_argument("--network_id", action="store", required=True) + network_update_parser.add_argument("--is_admin", action="store", required=True) + network_update_parser.set_defaults(func=network_update) + + #network delete + network_delete_parser = subparsers.add_parser("network-delete", help="delete network") + network_delete_parser.add_argument("--gid", action="store", required=True) + network_delete_parser.add_argument("--network_id", action="store", required=True) + network_delete_parser.set_defaults(func=network_delete) + + #process list + process_list_parser = subparsers.add_parser("process-list", help="list processes") + process_list_parser.add_argument("--gid", action="store", required=True) + process_list_parser.set_defaults(func=process_list) + + #process show + process_show_parser = subparsers.add_parser("process-show", help="list process details") + process_show_parser.add_argument("--gid", action="store", required=True) + process_show_parser.add_argument("--pid", action="store", required=True) + process_show_parser.set_defaults(func=process_show) + + #process create + process_create_parser = subparsers.add_parser("process-create", help="create process") + process_create_parser.add_argument("--gid", action="store", required=True) + process_create_parser.add_argument("--name", action="store") + process_create_parser.add_argument("--ppid", action="store") + process_create_parser.add_argument("--nova_flavor_id", action="store") + process_create_parser.add_argument("--glance_image_id", action="store") + process_create_parser.add_argument("--keypair_id", action="store") + process_create_parser.add_argument("--securitygroup_ids", action="store", nargs="+") + process_create_parser.add_argument("--metadata", metavar="key1=value1[,key2=value2...]", action="store") + process_create_parser.set_defaults(func=process_create) + + #process delete + process_delete_parser = subparsers.add_parser("process-delete", help="delete process") + process_delete_parser.add_argument("--gid", action="store", required=True) + process_delete_parser.add_argument("--pid", action="store", required=True) + process_delete_parser.set_defaults(func=process_delete) + + return parser + + +def group_list(args, headers): + url = args.url + "groups" + return requests.get(url, headers=headers) + + +def group_show(args, headers): + url = args.url + "groups/" + args.gid + return requests.get(url, headers=headers) + + +def group_create(args, headers): + url = args.url + "groups" + + payload = {} + if not args.name: + sys.exit("name is required.") + payload["name"] = args.name + if args.description: + payload["description"] = args.description + data = json.dumps(dict(group=payload)) + + return requests.post(url, data=data, headers=headers) + + +def group_update(args, headers): + url = args.url + "groups/" + args.gid + + payload = {} + if args.name: + payload["name"] = args.name + if args.description: + payload["description"] = args.description + if not payload: + sys.exit("No attribute is provided.") + data = json.dumps(dict(group=payload)) + + return requests.put(url, data=data, headers=headers) + + +def group_delete(args, headers): + url = args.url + "groups/" + args.gid + return requests.delete(url, headers=headers) + + +def keypair_list(args, headers): + url = args.url + "groups/" + args.gid + "/keypairs" + return requests.get(url, headers=headers) + + +def keypair_show(args, headers): + url = args.url + "groups/" + args.gid + "/keypairs/" + args.keypair_id + return requests.get(url, headers=headers) + + +def keypair_create(args, headers): + url = args.url + "groups/" + args.gid + "/keypairs" + + payload = {} + if args.name: + payload["name"] = args.name + if args.is_default: + payload["is_default"] = args.is_default + data = json.dumps(dict(keypair=payload)) + + return requests.post(url, data=data, headers=headers) + + +def keypair_update(args, headers): + url = args.url + "groups/" + args.gid + "/keypairs/" + args.keypair_id + + payload = {} + payload["is_default"] = args.is_default + data = json.dumps(dict(keypair=payload)) + + return requests.put(url, data=data, headers=headers) + + +def keypair_delete(args, headers): + url = args.url + "groups/" + args.gid + "/keypairs/" + args.keypair_id + return requests.delete(url, headers=headers) + + +def securitygroup_list(args, headers): + url = args.url + "groups/" + args.gid + "/securitygroups" + return requests.get(url, headers=headers) + + +def securitygroup_show(args, headers): + url = args.url + "groups/" + args.gid + "/securitygroups/" + args.securitygroup_id + return requests.get(url, headers=headers) + + +def securitygroup_create(args, headers): + url = args.url + "groups/" + args.gid + "/securitygroups" + + payload = {} + if args.name: + payload["name"] = args.name + if args.is_default: + payload["is_default"] = args.is_default + securitygrouprules = [] + for rule_params in args.securitygrouprules: + rule_dict = dict(v.split("=") for v in rule_params.split(",")) + securitygrouprules.append(rule_dict) + payload["securitygrouprules"] = securitygrouprules + data = json.dumps(dict(securitygroup=payload)) + + return requests.post(url, data=data, headers=headers) + + +def securitygroup_update(args, headers): + url = args.url + "groups/" + args.gid + "/securitygroups/" + args.securitygroup_id + + payload = {} + payload["is_default"] = args.is_default + data = json.dumps(dict(securitygroup=payload)) + + return requests.put(url, data=data, headers=headers) + + +def securitygroup_delete(args, headers): + url = args.url + "groups/" + args.gid + "/securitygroups/" + args.securitygroup_id + return requests.delete(url, headers=headers) + + +def network_list(args, headers): + url = args.url + "groups/" + args.gid + "/networks" + return requests.get(url, headers=headers) + + +def network_show(args, headers): + url = args.url + "groups/" + args.gid + "/networks/" + args.network_id + return requests.get(url, headers=headers) + + +def network_create(args, headers): + url = args.url + "groups/" + args.gid + "/networks" + + payload = {} + if args.name: + payload["name"] = args.name + if args.cidr: + payload["cidr"] = args.cidr + if args.is_admin: + payload["is_admin"] = args.is_admin + if args.gateway: + payload["gateway"] = args.gateway + if args.dns_nameservers: + payload["dns_nameservers"] = args.dns_nameservers + if args.ext_router_id: + payload["ext_router_id"] = args.ext_router_id + data = json.dumps(dict(network=payload)) + + return requests.post(url, data=data, headers=headers) + + +def network_update(args, headers): + url = args.url + "groups/" + args.gid + "/networks/" + args.network_id + + payload = {} + payload["is_admin"] = args.is_admin + data = json.dumps(dict(network=payload)) + + return requests.put(url, data=data, headers=headers) + + +def network_delete(args, headers): + url = args.url + "groups/" + args.gid + "/networks/" + args.network_id + return requests.delete(url, headers=headers) + + +def process_list(args, headers): + url = args.url + "groups/" + args.gid + "/processes" + return requests.get(url, headers=headers) + + +def process_show(args, headers): + url = args.url + "groups/" + args.gid + "/processes/" + args.pid + return requests.get(url, headers=headers) + + +def process_create(args, headers): + url = args.url + "groups/" + args.gid + "/processes" + + payload = {} + if args.name: + payload["name"] = args.name + if args.ppid: + payload["ppid"] = args.ppid + if args.keypair_id: + payload["keypair_id"] = args.keypair_id + if args.metadata: + payload["metadata"] = dict(v.split("=", 1) for v in args.metadata.split(",")) + if args.nova_flavor_id: + payload["nova_flavor_id"] = args.nova_flavor_id + if args.glance_image_id: + payload["glance_image_id"] = args.glance_image_id + if args.securitygroup_ids: + payload["securitygroup_ids"] = args.securitygroup_ids + data = json.dumps(dict(process=payload)) + + return requests.post(url, data=data, headers=headers) + + +def process_delete(args, headers): + url = args.url + "groups/" + args.gid + "/processes/" + args.pid + return requests.delete(url, headers=headers) + + +def main(): + parser = get_parser() + args = parser.parse_args() + + if args.noauth: + if not args.os_username or not args.os_tenant_name: + sys.exit("You must provide --os-username or env[OS_USERNAME], --os-tenant_name or env[OS_TENANT_NAME]") + token = ":".join([args.os_username, args.os_tenant_name]) + else: + if not args.os_username or not args.os_password\ + or not args.os_tenant_name or not args.os_auth_url: + sys.exit("You must provide --os-username or env[OS_USERNAME], " + "--os-password or env[OS_PASSWORD], --os-tenant_name or env[OS_TENANT_NAME], " + "and --os-auth_url or env[OS_AUTH_URL]") + + keystone = keystone_client.Client( + username=args.os_username, + password=args.os_password, + auth_url=args.os_auth_url, + tenant_name=args.os_tenant_name) + token = keystone.auth_token + + headers = {"content-type": "application/json", + "accept": "application/json", + "X-Auth-Token": token} + res = args.func(args, headers) + + print "HTTP STATUS: " + str(res.status_code) + print json.dumps(res.json(), indent=4) + + +if __name__ == "__main__": + main() diff --git a/tools/simple_integration_test.py b/tools/simple_integration_test.py new file mode 100644 index 0000000..725bb88 --- /dev/null +++ b/tools/simple_integration_test.py @@ -0,0 +1,599 @@ +# Copyright (c) 2014 ITOCHU Techno-Solutions Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import rack_client +import testtools +import time +import logging +from novaclient.v1_1 import client as nova_client +from neutronclient.v2_0 import client as neutron_client + +LOG = logging.getLogger(__name__) +LOG.setLevel(logging.DEBUG) +shandler = logging.StreamHandler() +shandler.setLevel(logging.DEBUG) +formatter = logging.Formatter("%(levelname)s %(message)s %(filename)s:%(lineno)s") +shandler.setFormatter(formatter) +LOG.addHandler(shandler) + +class RackTestCase(testtools.TestCase): + RETRY = 30 + test_user = "integrationtest" + keystone_endpoint = "http://localhost:5000/v2.0" + + def setUp(self): + super(RackTestCase, self).setUp() + + def tearDown(self): + super(RackTestCase, self).tearDown() + LOG.debug("Cleanup OpenStack resources") + nova = self.get_nova_client() + neutron = self.get_neutron_client() + #clean servers + servers = nova.servers.list() + for server in servers: + LOG.debug("Delete nova instance: %s", server.id) + nova.servers.delete(server.id) + retry = 0 + while retry < self.RETRY: + servers = nova.servers.list() + if len(servers) == 0: + break + retry += 1 + #clean networks + ports = neutron.list_ports() + networks = neutron.list_networks() + for port in ports["ports"]: + router_id = None + if port["device_owner"] == "network:router_interface": + router_id = port["device_id"] + if router_id: + neutron.remove_interface_router(router_id, {"port_id": port["id"]}) + for network in networks["networks"]: + if network["name"] != "public": + LOG.debug("Delete network: %s", network["id"]) + neutron.delete_network(network["id"]) + #clean securitygroups + securitygroups = neutron.list_security_groups()["security_groups"] + for i in securitygroups: + if i["name"] != "default": + LOG.debug("Delete security group: %s", i["id"]) + neutron.delete_security_group(i["id"]) + #clean keypairs + keypairs = nova.keypairs.list() + for i in keypairs: + LOG.debug("Delete keypair: %s", i.id) + nova.keypairs.delete(i.id) + + def get_nova_client(self): + credentials = { + "username": self.test_user, + "api_key": self.test_user, + "project_id": self.test_user, + "auth_url": self.keystone_endpoint + } + return nova_client.Client(**credentials) + + def get_neutron_client(self): + credentials = { + "username": self.test_user, + "password": self.test_user, + "tenant_name": self.test_user, + "auth_url": self.keystone_endpoint + } + return neutron_client.Client(**credentials) + + def _do_request(self, arg_list, func_name): + headers = { + "content-type": "application/json", + "accept": "application/json", + "X-Auth-Token": ":".join([self.test_user, self.test_user]) + } + parser = rack_client.get_parser() + + LOG.debug(" ".join(arg_list)) + args = parser.parse_args(arg_list) + func = getattr(rack_client, func_name) + res = func(args, headers) + if func_name.split("_")[1] != "delete": + LOG.debug(res.json()) + + return res + + def _wait_for_active(self, arg_list, show_func_name): + retry = 0 + while retry < self.RETRY: + res = self._do_request(arg_list, show_func_name) + self.assertEquals(200, res.status_code) + ref = res.json().values()[0] + if ref["status"] == "ACTIVE": + break + elif ref["status"] == "ERROR": + self.fail("status ERROR") + time.sleep(2) + retry += 1 + if retry == self.RETRY: + self.fail("Timeout for waiting") + + def _group_list(self): + arg_list = ["group-list"] + return self._do_request(arg_list, "group_list") + + def _group_create(self, name, description=None): + group_create = ["group-create", "--name", name] + if description: + group_create.extend(["--description", description]) + return self._do_request(group_create, "group_create") + + def _group_delete(self, gid): + arg_list = ["group-delete", "--gid", gid] + return self._do_request(arg_list, "group_delete") + + def _keypair_list(self, gid): + arg_list = ["keypair-list", "--gid", gid] + return self._do_request(arg_list, "keypair_list") + + def _keypair_create(self, gid, name=None, is_default=None): + arg_list = ["keypair-create", "--gid", gid] + if name: + arg_list.extend(["--name", name]) + if is_default: + arg_list.extend(["--is_default", is_default]) + return self._do_request(arg_list, "keypair_create") + + def _keypair_delete(self, gid, keypair_id): + arg_list = ["keypair-delete", "--gid", gid, "--keypair_id", keypair_id] + return self._do_request(arg_list, "keypair_delete") + + def _securitygroup_list(self, gid): + arg_list = ["securitygroup-list", "--gid", gid] + return self._do_request(arg_list, "securitygroup_list") + + def _securitygroup_create(self, gid, name=None, is_default=None, rules=None): + arg_list = ["securitygroup-create", "--gid", gid] + if name: + arg_list.extend(["--name", name]) + if is_default: + arg_list.extend(["--is_default", is_default]) + if rules: + arg_list.append("--securitygrouprules") + arg_list.extend(rules) + return self._do_request(arg_list, "securitygroup_create") + + def _securitygroup_delete(self, gid, securitygroup_id): + arg_list = ["securitygroup-delete", "--gid", gid, "--securitygroup_id", securitygroup_id] + return self._do_request(arg_list, "securitygroup_delete") + + def _network_list(self, gid): + arg_list = ["network-list", "--gid", gid] + return self._do_request(arg_list, "network_list") + + def _network_create(self, gid, cidr, name=None, gateway=None, dns_nameservers=[], ext_router_id=None): + arg_list = ["network-create", "--gid", gid, "--cidr", cidr] + if name: + arg_list.extend(["--name", name]) + if gateway: + arg_list.extend(["--gateway", gateway]) + if dns_nameservers: + arg_list.append("--dns_nameservers") + arg_list.extend(dns_nameservers) + if ext_router_id: + arg_list.extend(["--ext_router_id", ext_router_id]) + return self._do_request(arg_list, "network_create") + + def _network_delete(self, gid, network_id): + arg_list = ["network-delete", "--gid", gid, "--network_id", network_id] + return self._do_request(arg_list, "network_delete") + + def _process_list(self, gid): + arg_list = ["process-list", "--gid", gid] + return self._do_request(arg_list, "process_list") + + def _process_create(self, gid, nova_flavor_id=None, glance_image_id=None, securitygroup_ids=None, ppid=None, name=None, keypair_id=None, metadata=None): + arg_list = ["process-create", "--gid", gid] + if securitygroup_ids: + arg_list.append("--securitygroup_ids") + arg_list.extend(securitygroup_ids) + if nova_flavor_id: + arg_list.extend(["--nova_flavor_id", nova_flavor_id]) + if glance_image_id: + arg_list.extend(["--glance_image_id", glance_image_id]) + if ppid: + arg_list.extend(["--ppid", ppid]) + if name: + arg_list.extend(["--name", name]) + if keypair_id: + arg_list.extend(["--keypair_id", keypair_id]) + if metadata: + arg_list.extend(["--metadata", metadata]) + return self._do_request(arg_list, "process_create") + + def _process_delete(self, gid, pid): + arg_list = ["process-delete", "--gid", gid, "--pid", pid] + return self._do_request(arg_list, "process_delete") + +#### test cases #### + + def test_group(self): + name = "group1" + description = "This is group1" + + #create + res = self._group_create(name, description) + self.assertEqual(201, res.status_code) + group_ref1 = res.json()["group"] + self.assertEquals(name, group_ref1["name"]) + self.assertEquals(description, group_ref1["description"]) + + res = self._group_create("group2") + group_ref2 = res.json()["group"] + self.assertEqual(201, res.status_code) + + #list + res = self._group_list() + self.assertEquals(200, res.status_code) + group_refs = res.json()["groups"] + self.assertEquals(2, len(group_refs)) + + #show + arg_list = ["group-show", "--gid", group_ref1["gid"]] + res = self._do_request(arg_list, "group_show") + self.assertEquals(200, res.status_code) + group_ref = res.json()["group"] + self.assertEquals(group_ref1, group_ref) + + #update + arg_list = ["group-update", "--gid", group_ref1["gid"], "--name", "group1-2", "--description", "Group name changed"] + res = self._do_request(arg_list, "group_update") + self.assertEquals(200, res.status_code) + group_ref = res.json()["group"] + self.assertEquals("group1-2", group_ref["name"]) + self.assertEquals("Group name changed", group_ref["description"]) + + #delete + res = self._group_delete(group_ref1["gid"]) + self.assertEquals(204, res.status_code) + res = self._group_delete(group_ref2["gid"]) + self.assertEquals(204, res.status_code) + res = self._group_delete(group_ref1["gid"]) + self.assertEquals(404, res.status_code) + + def test_keypair(self): + res = self._group_create("group") + group_ref = res.json()["group"] + gid = group_ref["gid"] + + #create + name = "keypair1" + is_default = "true" + res = self._keypair_create(gid=gid, name=name, is_default=is_default) + self.assertEqual(202, res.status_code) + keypair_ref1 = res.json()["keypair"] + self.assertEqual(name, keypair_ref1["name"]) + self.assertEqual(True, keypair_ref1["is_default"]) + arg_list = ["keypair-show", "--gid", gid, "--keypair_id", keypair_ref1["keypair_id"]] + self._wait_for_active(arg_list, "keypair_show") + + res = self._keypair_create(gid) + self.assertEqual(202, res.status_code) + keypair_ref2 = res.json()["keypair"] + arg_list = ["keypair-show", "--gid", gid, "--keypair_id", keypair_ref2["keypair_id"]] + self._wait_for_active(arg_list, "keypair_show") + + #list + res = self._keypair_list(gid) + self.assertEqual(200, res.status_code) + keypair_refs = res.json()["keypairs"] + self.assertEquals(2, len(keypair_refs)) + + #update + arg_list = ["keypair-update", "--gid", gid, "--keypair_id", keypair_ref2["keypair_id"], "--is_default", "true"] + res = self._do_request(arg_list, "keypair_update") + self.assertEquals(200, res.status_code) + keypair_ref = res.json()["keypair"] + self.assertEquals(True, keypair_ref["is_default"]) + + #delete + res = self._keypair_delete(gid, keypair_ref1["keypair_id"]) + self.assertEquals(204, res.status_code) + res = self._keypair_delete(gid, keypair_ref2["keypair_id"]) + self.assertEquals(204, res.status_code) + res = self._keypair_delete(gid, keypair_ref1["keypair_id"]) + self.assertEquals(404, res.status_code) + + res = self._keypair_list(gid) + self.assertEquals(200, res.status_code) + keypair_refs = res.json()["keypairs"] + self.assertEquals(0, len(keypair_refs)) + + res = self._group_delete(gid) + self.assertEquals(204, res.status_code) + + def test_securitygroup(self): + res = self._group_create("group") + group_ref = res.json()["group"] + gid = group_ref["gid"] + + #create + name = "securitygroup1" + is_default = "true" + rules = [] + rules.append("protocol=icmp,remote_ip_prefix=10.0.0.0/24") + rules.append("protocol=tcp,port_range_max=1023,port_range_min=1,remote_ip_prefix=10.0.0.0/24") + rules.append("protocol=udp,port_range_max=1023,port_range_min=1,remote_ip_prefix=10.0.0.0/24") + res = self._securitygroup_create(gid, name=name, is_default=is_default, rules=rules) + self.assertEqual(202, res.status_code) + securitygroup_ref1 = res.json()["securitygroup"] + self.assertEqual(name, securitygroup_ref1["name"]) + self.assertEqual(True, securitygroup_ref1["is_default"]) + arg_list = ["securitygroup-show", "--gid", gid, "--securitygroup_id", securitygroup_ref1["securitygroup_id"]] + self._wait_for_active(arg_list, "securitygroup_show") + + res = self._securitygroup_create(gid) + self.assertEqual(202, res.status_code) + securitygroup_ref2 = res.json()["securitygroup"] + arg_list = ["securitygroup-show", "--gid", gid, "--securitygroup_id", securitygroup_ref2["securitygroup_id"]] + self._wait_for_active(arg_list, "securitygroup_show") + + #list + res = self._securitygroup_list(gid) + self.assertEqual(200, res.status_code) + securitygroup_refs = res.json()["securitygroups"] + self.assertEquals(2, len(securitygroup_refs)) + + #update + arg_list = ["securitygroup-update", "--gid", gid, "--securitygroup_id", securitygroup_ref2["securitygroup_id"], "--is_default", "true"] + res = self._do_request(arg_list, "securitygroup_update") + self.assertEquals(200, res.status_code) + securitygroup_ref = res.json()["securitygroup"] + self.assertEquals(True, securitygroup_ref["is_default"]) + + #delete + res = self._securitygroup_delete(gid, securitygroup_ref1["securitygroup_id"]) + self.assertEquals(204, res.status_code) + res = self._securitygroup_delete(gid, securitygroup_ref2["securitygroup_id"]) + self.assertEquals(204, res.status_code) + res = self._securitygroup_delete(gid, securitygroup_ref1["securitygroup_id"]) + self.assertEquals(404, res.status_code) + + res = self._securitygroup_list(gid) + self.assertEquals(200, res.status_code) + securitygroup_refs = res.json()["securitygroups"] + self.assertEquals(0, len(securitygroup_refs)) + + res = self._group_delete(gid) + self.assertEquals(204, res.status_code) + + def test_network(self): + res = self._group_create("group") + group_ref = res.json()["group"] + gid = group_ref["gid"] + + #create + cidr1 = "10.0.0.0/24" + name = "network1" + ext_router_id="50c915ab-c128-46bc-b3d0-a464bcdf1acc" + res = self._network_create(gid, cidr1, name=name, gateway="10.0.0.254", dns_nameservers=["8.8.8.8", "8.8.4.4"], ext_router_id=ext_router_id) + self.assertEqual(202, res.status_code) + network_ref1 = res.json()["network"] + self.assertEqual(cidr1, network_ref1["cidr"]) + self.assertEqual(name, network_ref1["name"]) + self.assertEqual(ext_router_id, network_ref1["ext_router_id"]) + arg_list = ["network-show", "--gid", gid, "--network_id", network_ref1["network_id"]] + self._wait_for_active(arg_list, "network_show") + + cidr2 = "10.0.1.0/24" + res = self._network_create(gid, cidr2) + self.assertEqual(202, res.status_code) + network_ref2 = res.json()["network"] + arg_list = ["network-show", "--gid", gid, "--network_id", network_ref2["network_id"]] + self._wait_for_active(arg_list, "network_show") + + #list + res = self._network_list(gid) + self.assertEqual(200, res.status_code) + network_refs = res.json()["networks"] + self.assertEquals(2, len(network_refs)) + + """ + arg_list = ["network-update", "--gid", gid, "--network_id", network_ref1["network_id"], "--is_admin", "true"] + res = self._do_request(arg_list, "network_update") + self.assertEquals(200, res.status_code) + network_ref = res.json()["network"] + self.assertEquals(True, network_ref["is_admin"]) + """ + + #delete + res = self._network_delete(gid, network_ref1["network_id"]) + self.assertEquals(204, res.status_code) + res = self._network_delete(gid, network_ref2["network_id"]) + self.assertEquals(204, res.status_code) + res = self._network_delete(gid, network_ref1["network_id"]) + self.assertEquals(404, res.status_code) + + res = self._network_list(gid) + self.assertEquals(200, res.status_code) + network_refs = res.json()["networks"] + self.assertEquals(0, len(network_refs)) + + res = self._group_delete(gid) + self.assertEquals(204, res.status_code) + + def test_process(self): + #create group + res = self._group_create("group") + group_ref = res.json()["group"] + gid = group_ref["gid"] + + #create keypair + res = self._keypair_create(gid) + self.assertEquals(202, res.status_code) + keypair_ref1 = res.json()["keypair"] + arg_list = ["keypair-show", "--gid", gid, "--keypair_id", keypair_ref1["keypair_id"]] + self._wait_for_active(arg_list, "keypair_show") + + res = self._keypair_create(gid) + self.assertEquals(202, res.status_code) + keypair_ref2 = res.json()["keypair"] + arg_list = ["keypair-show", "--gid", gid, "--keypair_id", keypair_ref2["keypair_id"]] + self._wait_for_active(arg_list, "keypair_show") + + #create securitygroups + rules = ["protocol=icmp,remote_ip_prefix=10.0.0.0/24"] + res = self._securitygroup_create(gid, rules=rules) + self.assertEquals(202, res.status_code) + securitygroup_ref1 = res.json()["securitygroup"] + arg_list = ["securitygroup-show", "--gid", gid, "--securitygroup_id", securitygroup_ref1["securitygroup_id"]] + self._wait_for_active(arg_list, "securitygroup_show") + + rules = ["protocol=tcp,port_range_max=1023,port_range_min=1,remote_ip_prefix=10.0.0.0/24"] + res = self._securitygroup_create(gid, rules=rules) + self.assertEquals(202, res.status_code) + securitygroup_ref2 = res.json()["securitygroup"] + arg_list = ["securitygroup-show", "--gid", gid, "--securitygroup_id", securitygroup_ref2["securitygroup_id"]] + self._wait_for_active(arg_list, "securitygroup_show") + + rules = ["protocol=udp,port_range_max=1023,port_range_min=1,remote_ip_prefix=10.0.0.0/24"] + res = self._securitygroup_create(gid, rules=rules) + self.assertEquals(202, res.status_code) + securitygroup_ref3 = res.json()["securitygroup"] + arg_list = ["securitygroup-show", "--gid", gid, "--securitygroup_id", securitygroup_ref3["securitygroup_id"]] + self._wait_for_active(arg_list, "securitygroup_show") + + #create networks + cidr1 = "10.0.0.0/24" + ext_router_id="50c915ab-c128-46bc-b3d0-a464bcdf1acc" + res = self._network_create(gid, cidr1, gateway="10.0.0.254", dns_nameservers=["8.8.8.8", "8.8.4.4"], ext_router_id=ext_router_id) + self.assertEquals(202, res.status_code) + network_ref1 = res.json()["network"] + arg_list = ["network-show", "--gid", gid, "--network_id", network_ref1["network_id"]] + self._wait_for_active(arg_list, "network_show") + + cidr2 = "10.0.1.0/24" + res = self._network_create(gid, cidr2, dns_nameservers=["8.8.8.8", "8.8.4.4"]) + self.assertEquals(202, res.status_code) + network_ref2 = res.json()["network"] + arg_list = ["network-show", "--gid", gid, "--network_id", network_ref2["network_id"]] + self._wait_for_active(arg_list, "network_show") + + #create process + nova_flavor_id = "2" + glance_image_id = "5aea309f-9638-44de-827d-5125ff7e4689" + name = "process1" + keypair_id = keypair_ref1["keypair_id"] + securitygroup_ids = [securitygroup_ref1["securitygroup_id"], securitygroup_ref2["securitygroup_id"]] + metadata = "key1=value1,key2=value2" + res = self._process_create(gid, nova_flavor_id, glance_image_id, securitygroup_ids, name=name, keypair_id=keypair_id, metadata=metadata) + self.assertEqual(202, res.status_code) + process_ref1 = res.json()["process"] + self.assertEqual(nova_flavor_id, process_ref1["nova_flavor_id"]) + self.assertEqual(glance_image_id, process_ref1["glance_image_id"]) + self.assertEqual(name, process_ref1["name"]) + self.assertEqual(keypair_id, process_ref1["keypair_id"]) + self.assertEqual(sorted(securitygroup_ids), sorted(process_ref1["securitygroup_ids"])) + arg_list = ["process-show", "--gid", gid, "--pid", process_ref1["pid"]] + self._wait_for_active(arg_list, "process_show") + + res = self._process_create(gid, ppid=process_ref1["pid"]) + self.assertEqual(202, res.status_code) + process_ref2 = res.json()["process"] + self.assertEqual(process_ref1["pid"], process_ref2["ppid"]) + arg_list = ["process-show", "--gid", gid, "--pid", process_ref2["pid"]] + self._wait_for_active(arg_list, "process_show") + + #list processes + res = self._process_list(gid) + self.assertEqual(200, res.status_code) + process_refs = res.json()["processes"] + self.assertEquals(2, len(process_refs)) + + #create unused network + cidr3 = "10.0.2.0/24" + res = self._network_create(gid, cidr3, dns_nameservers=["8.8.8.8", "8.8.4.4"]) + self.assertEquals(202, res.status_code) + network_ref3 = res.json()["network"] + arg_list = ["network-show", "--gid", gid, "--network_id", network_ref3["network_id"]] + self._wait_for_active(arg_list, "network_show") + + #delete unused keypair + res = self._keypair_delete(gid, keypair_ref2["keypair_id"]) + self.assertEquals(204, res.status_code) + + #delete used keypair + res = self._keypair_delete(gid, keypair_ref1["keypair_id"]) + self.assertEqual(409, res.status_code) + + #delete unused securitygroup + res = self._securitygroup_delete(gid, securitygroup_ref3["securitygroup_id"]) + self.assertEquals(204, res.status_code) + + #delete used securitygroup + res = self._securitygroup_delete(gid, securitygroup_ref1["securitygroup_id"]) + self.assertEqual(409, res.status_code) + + #delete unused network + res = self._network_delete(gid, network_ref3["network_id"]) + self.assertEquals(204, res.status_code) + + #delete used network + res = self._network_delete(gid, network_ref1["network_id"]) + self.assertEqual(409, res.status_code) + + #delete used group + res = self._group_delete(gid) + self.assertEqual(409, res.status_code) + + #delete process + res = self._process_delete(gid, process_ref1["pid"]) + self.assertEquals(204, res.status_code) + res = self._process_delete(gid, process_ref2["pid"]) + self.assertEquals(404, res.status_code) + res = self._process_delete(gid, process_ref1["pid"]) + self.assertEquals(404, res.status_code) + + res = self._process_list(gid) + self.assertEquals(200, res.status_code) + process_refs = res.json()["processes"] + self.assertEquals(0, len(process_refs)) + + #delete used group + res = self._group_delete(gid) + self.assertEqual(409, res.status_code) + + #cleanup network + res = self._network_delete(gid, network_ref1["network_id"]) + self.assertEqual(204, res.status_code) + res = self._network_delete(gid, network_ref2["network_id"]) + self.assertEqual(204, res.status_code) + + #delete used group + res = self._group_delete(gid) + self.assertEqual(409, res.status_code) + + #cleanup securitygroup + res = self._securitygroup_delete(gid, securitygroup_ref1["securitygroup_id"]) + self.assertEqual(204, res.status_code) + res = self._securitygroup_delete(gid, securitygroup_ref2["securitygroup_id"]) + self.assertEqual(204, res.status_code) + + #delete used group + res = self._group_delete(gid) + self.assertEqual(409, res.status_code) + + #cleanup keypair + res = self._keypair_delete(gid, keypair_ref1["keypair_id"]) + self.assertEquals(204, res.status_code) + + #delete group + res = self._group_delete(gid) + self.assertEquals(204, res.status_code) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..e1086ba --- /dev/null +++ b/tox.ini @@ -0,0 +1,30 @@ +[tox] +minversion = 1.6 +envlist = py26,py27,py33,pep8 +skipsdist = True + +[tox:jenkins] +downloadcache = ~/.pip_cache + +[testenv] +usedevelop = True +install_command = pip install -U --force-reinstall {opts} {packages} +setenv = VIRTUAL_ENV={envdir} +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + python -m rack.openstack.common.lockutils python setup.py testr --slowest --testr-args='{posargs}' + +[testenv:cover] +setenv = VIRTUAL_ENV={envdir} +commands = + python -m rack.openstack.common.lockutils python setup.py testr --coverage --testr-args='^(?!.*test.*coverage).*$' + +[testenv:pep8] +commands = + flake8 {posargs} + +[flake8] +ignore = +exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools