diff --git a/rally-jobs/certifcation_task_args.yaml b/rally-jobs/certifcation_task_args.yaml deleted file mode 100644 index d77d743ed1..0000000000 --- a/rally-jobs/certifcation_task_args.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - service_list: - - authentication - - nova - - neutron - - keystone - - cinder - - glance - use_existing_users: false - image_name: "^(cirros.*-disk|TestVM)$" - flavor_name: "m1.tiny" - glance_image_location: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - smoke: true - users_amount: 1 - tenants_amount: 1 - controllers_amount: 1 - compute_amount: 1 - storage_amount: 1 - network_amount: 1 - diff --git a/rally-jobs/cinder.yaml b/rally-jobs/cinder.yaml deleted file mode 100755 index 210b589240..0000000000 --- a/rally-jobs/cinder.yaml +++ /dev/null @@ -1,1101 +0,0 @@ -{% set image_name = "^(cirros.*-disk|TestVM)$" %} -{% set flavor_name = "m1.tiny" %} ---- - version: 2 - title: Task for gate-rally-dsvm-rally-cinder job - description: Testing mostly cinder related plugins - subtasks: - - - title: CinderVolumes.create_volume tests - workloads: - - - scenario: - CinderVolumes.create_volume: - size: 1 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - scenario: - CinderVolumes.create_volume: - size: - min: 1 - max: 2 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - cinder: - version: 2 - service_name: cinderv2 - - - description: "Create a volume from the specified image." - scenario: - CinderVolumes.create_volume: - size: 1 - image: - name: {{image_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: Authenticate.validate_cinder tests - scenario: - Authenticate.validate_cinder: - repetitions: 2 - runner: - constant: - times: 10 - concurrency: 5 - contexts: - users: - tenants: 2 - users_per_tenant: 1 - - - title: Quotas.cinder_update_and_delete tests - scenario: - Quotas.cinder_update_and_delete: - max_quota: 1024 - runner: - constant: - times: 4 - concurrency: 1 - contexts: - users: - tenants: 2 - users_per_tenant: 1 - - - title: Quotas.cinder_update test - scenario: - Quotas.cinder_update: - max_quota: 1024 - runner: - constant: - times: 4 - concurrency: 1 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: Quotas.cinder_get tests - scenario: - Quotas.cinder_get: {} - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: CinderVolumes.create_and_delete_volume tests - workloads: - - - scenario: - CinderVolumes.create_and_delete_volume: - size: 1 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - description: "Create a volume from specific image and delete it." - scenario: - CinderVolumes.create_and_delete_volume: - size: 1 - image: - name: {{image_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - description: "Create a volume from image created by image context." - scenario: - CinderVolumes.create_and_delete_volume: - size: 1 - image: - name: "image-context-test" - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 2 - roles: - - admin - images: - image_url: "~/.rally/extra/fake-image.img" - disk_format: "raw" - container_format: "bare" - images_per_tenant: 1 - image_name: "image-context-test" - visibility: "public" - - - title: CinderVolumes.create_and_update_volume tests - workloads: - - - scenario: - CinderVolumes.create_and_update_volume: - update_volume_kwargs: - description: "desc_updated" - size: 1 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - description: "Create volume from image and update it." - scenario: - CinderVolumes.create_and_update_volume: - update_volume_kwargs: - description: "desc_updated" - size: 1 - image: - name: {{image_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: CinderVolumes.create_volume_and_update_readonly_flag tests - workloads: - - - scenario: - CinderVolumes.create_volume_and_update_readonly_flag: - size: 1 - read_only: true - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - description: "Create volume from image and update read only flag." - scenario: - CinderVolumes.create_volume_and_update_readonly_flag: - size: 1 - read_only: false - image: - name: {{image_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: CinderVolumes.create_and_list_volume tests - workloads: - - - scenario: - CinderVolumes.create_and_list_volume: - size: 1 - detailed: True - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - scenario: - CinderVolumes.create_and_list_volume: - size: - min: 1 - max: 2 - detailed: True - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - description: "Create volume from image and list volumes." - scenario: - CinderVolumes.create_and_list_volume: - size: 1 - detailed: True - image: - name: {{image_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: CinderVolumes.create_and_get_volume tests - workloads: - - - scenario: - CinderVolumes.create_and_get_volume: - size: 1 - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - scenario: - CinderVolumes.create_and_get_volume: - size: - min: 1 - max: 2 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - description: "Create volume from image and get it." - scenario: - CinderVolumes.create_and_get_volume: - size: 1 - image: - name: {{image_name}} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: CinderVolumes.list_volumes tests - scenario: - CinderVolumes.list_volumes: - detailed: True - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 1 - volumes: - size: 1 - volumes_per_tenant: 2 - - - - title: CinderVolumes.list_types tests - workloads: - {% for s in ("true", "false") %} - - - scenario: - CinderVolumes.list_types: - is_public: {{s}} - runner: - constant: - times: 10 - concurrency: 5 - contexts: - users: - tenants: 2 - users_per_tenant: 3 - {% endfor %} - - - - title: CinderVolumes.create_and_accept_transfer tests - workloads: - - - scenario: - CinderVolumes.create_and_accept_transfer: - size: 1 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - description: "Create volume from image and accept transfer." - scenario: - CinderVolumes.create_and_accept_transfer: - size: 1 - image: - name: {{image_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: CinderVolumes.create_and_extend_volume tests - workloads: - - - scenario: - CinderVolumes.create_and_extend_volume: - size: 1 - new_size: 2 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - scenario: - CinderVolumes.create_and_extend_volume: - size: - min: 1 - max: 2 - new_size: - min: 3 - max: 4 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: CinderVolumes.create_from_volume_and_delete_volume test - workloads: - - - scenario: - CinderVolumes.create_from_volume_and_delete_volume: - size: 1 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - - - scenario: - CinderVolumes.create_from_volume_and_delete_volume: - size: - min: 1 - max: 2 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - - - title: CinderVolumes.create_and_attach_volume test - scenario: - CinderVolumes.create_and_attach_volume: - size: 1 - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: CinderVolumes.create_snapshot_and_attach_volume tests - workloads: - - - scenario: - CinderVolumes.create_snapshot_and_attach_volume: - volume_type: "lvmdriver-1" - size: 1 - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - description: "Create a volume using volume type created in context." - scenario: - CinderVolumes.create_snapshot_and_attach_volume: - volume_type: "test" - size: 1 - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: - - "test" - - - title: CinderVolumes.create_and_delete_snapshot tests - scenario: - CinderVolumes.create_and_delete_snapshot: - force: false - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - volumes: - size: 1 - - - title: CinderVolumes.create_and_list_snapshots tests - scenario: - CinderVolumes.create_and_list_snapshots: - force: False - detailed: True - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - - - - title: CinderVolumes.create_and_upload_volume_to_image tests - workloads: - - - scenario: - CinderVolumes.create_and_upload_volume_to_image: - size: 1 - image: - name: {{image_name}} - runner: - constant: - times: 1 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - glance: - version: 2 - - - description: > - Create a volume using type created by context & upload to image - scenario: - CinderVolumes.create_and_upload_volume_to_image: - size: 1 - volume_type: test - image: - name: {{image_name}} - runner: - constant: - times: 1 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - volume_types: - - test - - - title: CinderVolumes.create_volume_backup tests - workloads: - - - scenario: - CinderVolumes.create_volume_backup: - size: 1 - do_delete: True - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - scenario: - CinderVolumes.create_volume_backup: - size: 1 - do_delete: False - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: CinderVolumeBackups.create_incremental_volume_backup tests - scenario: - CinderVolumeBackups.create_incremental_volume_backup: - size: 1 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: CinderVolumes.create_and_restore_volume_backup tests - workloads: - - - scenario: - CinderVolumes.create_and_restore_volume_backup: - size: 1 - do_delete: True - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - scenario: - CinderVolumes.create_and_restore_volume_backup: - size: 1 - do_delete: True - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: CinderVolumes.create_and_list_volume_backups tests - workloads: - - - description: "Create volume backup and list, delete backup at the end." - scenario: - CinderVolumes.create_and_list_volume_backups: - size: 1 - detailed: True - do_delete: True - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - description: "Create volume backup and list, don't delete backup." - scenario: - CinderVolumes.create_and_list_volume_backups: - size: 1 - detailed: True - do_delete: False - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: CinderVolumes.create_nested_snapshots_and_attach_volume tests - scenario: - CinderVolumes.create_nested_snapshots_and_attach_volume: - size: - min: 1 - max: 1 - nested_level: 2 - image: - name: {{image_name}} - flavor: - name: {{flavor_name}} - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 51 - - - title: CinderVolumes.create_volume_and_clone tests - workloads: - - - scenario: - CinderVolumes.create_volume_and_clone: - size: 1 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - scenario: - CinderVolumes.create_volume_and_clone: - size: - min: 1 - max: 1 - nested_level: 2 - runner: - constant: - times: 1 - concurrency: 1 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: CinderVolumeTypes.create_and_update_volume_type tests - scenario: - CinderVolumeTypes.create_and_update_volume_type: - description: "test" - update_description: "test update" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: CinderVolumes.create_volume_from_snapshot - scenario: - CinderVolumes.create_volume_from_snapshot: - do_delete: true - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - volumes: - size: 1 - sla: - failure_rate: - max: 51 - - - title: CinderVolumeTypes.create_and_get_volume_type tests - scenario: - CinderVolumeTypes.create_and_get_volume_type: - description: "rally tests creating types" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: CinderVolumeTypes.create_and_delete_volume_type tests - scenario: - CinderVolumeTypes.create_and_delete_volume_type: - description: "rally tests creating types" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: CinderVolumeTypes.create_and_delete_encryption_type tests - workloads: - - - scenario: - CinderVolumeTypes.create_and_delete_encryption_type: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - constant: - times: 4 - concurrency: 1 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - - - scenario: - CinderVolumeTypes.create_and_delete_encryption_type: - create_specs: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - constant: - times: 1 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - - - - title: CinderVolumeTypes.create_and_list_volume_types tests - scenario: - CinderVolumeTypes.create_and_list_volume_types: - description: "rally tests creating types" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: CinderVolumeTypes.create_volume_type_and_encryption_type tests - workloads: - - - scenario: - CinderVolumeTypes.create_volume_type_and_encryption_type: - description: "rally tests creating types" - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - scenario: - CinderVolumeTypes.create_volume_type_and_encryption_type: - create_specs: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - constant: - times: 1 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - - title: CinderVolumeTypes.create_and_list_encryption_type tests - workloads: - - - scenario: - CinderVolumeTypes.create_and_list_encryption_type: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - - - scenario: - CinderVolumeTypes.create_and_list_encryption_type: - create_specs: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - constant: - times: 1 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - - - title: CinderVolumeTypes.create_and_set_volume_type_keys tests - scenario: - CinderVolumeTypes.create_and_set_volume_type_keys: - description: "rally tests creating types" - volume_type_key: - volume_backend_name: "LVM_iSCSI" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: CinderVolumes.list_transfers tests - scenario: - CinderVolumes.list_transfers: - detailed: true - runner: - constant: - times: 3 - concurrency: 2 - contexts: - users: - tenants: 3 - users_per_tenant: 2 - - - - title: CinderQos.create_and_list_qos tests - scenario: - CinderQos.create_and_list_qos: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: CinderQos.create_and_get_qos tests - scenario: - CinderQos.create_and_get_qos: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: CinderQos.create_qos_associate_and_disassociate_type tests - scenario: - CinderQos.create_qos_associate_and_disassociate_type: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - constant: - times: 2 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - volume_types: [ - "test_type1", - "test_type2" - ] - - - - title: CinderVolumeTypes.create_get_and_delete_encryption_type tests - scenario: - CinderVolumeTypes.create_get_and_delete_encryption_type: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - - - title: CinderVolumeTypes.create_and_update_encryption_type tests - scenario: - CinderVolumeTypes.create_and_update_encryption_type: - create_provider: "LuksEncryptor" - create_cipher: "aes-xts-plain64" - create_key_size: 512 - create_control_location: "front-end" - update_provider: "CryptsetupEncryptor" - update_cipher: "aes-xts-plain" - update_key_size: 256 - update_control_location: "back-end" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - - - - title: CinderQos.create_and_set_qos tests - scenario: - CinderQos.create_and_set_qos: - consumer: "back-end" - write_iops_sec: "10" - read_iops_sec: "1000" - set_consumer: "both" - set_write_iops_sec: "11" - set_read_iops_sec: "1001" - runner: - constant: - times: 5 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: CinderVolumeTypes.create_volume_type_add_and_list_type_access tests - scenario: - CinderVolumeTypes.create_volume_type_add_and_list_type_access: - description: "rally tests creating types" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/rally-jobs/extra/autoscaling_group.yaml.template b/rally-jobs/extra/autoscaling_group.yaml.template deleted file mode 100644 index 6c9892b411..0000000000 --- a/rally-jobs/extra/autoscaling_group.yaml.template +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - flavor: - type: string - default: m1.tiny - constraints: - - custom_constraint: nova.flavor - image: - type: string - default: cirros-0.3.5-x86_64-disk - constraints: - - custom_constraint: glance.image - scaling_adjustment: - type: number - default: 1 - max_size: - type: number - default: 5 - constraints: - - range: {min: 1} - - -resources: - asg: - type: OS::Heat::AutoScalingGroup - properties: - resource: - type: OS::Nova::Server - properties: - image: { get_param: image } - flavor: { get_param: flavor } - min_size: 1 - desired_capacity: 3 - max_size: { get_param: max_size } - - scaling_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: asg} - scaling_adjustment: { get_param: scaling_adjustment } - -outputs: - scaling_url: - value: {get_attr: [scaling_policy, alarm_url]} diff --git a/rally-jobs/extra/autoscaling_policy.yaml.template b/rally-jobs/extra/autoscaling_policy.yaml.template deleted file mode 100644 index a22487e339..0000000000 --- a/rally-jobs/extra/autoscaling_policy.yaml.template +++ /dev/null @@ -1,17 +0,0 @@ -heat_template_version: 2013-05-23 - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: 1 \ No newline at end of file diff --git a/rally-jobs/extra/default.yaml.template b/rally-jobs/extra/default.yaml.template deleted file mode 100644 index eb4f2f2dd8..0000000000 --- a/rally-jobs/extra/default.yaml.template +++ /dev/null @@ -1 +0,0 @@ -heat_template_version: 2014-10-16 \ No newline at end of file diff --git a/rally-jobs/extra/fake-image.img b/rally-jobs/extra/fake-image.img deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally-jobs/extra/mistral_input.json b/rally-jobs/extra/mistral_input.json deleted file mode 100644 index 2d3edf39bb..0000000000 --- a/rally-jobs/extra/mistral_input.json +++ /dev/null @@ -1 +0,0 @@ -{"input1": "value1", "some_json_input": {"a": "b"}} \ No newline at end of file diff --git a/rally-jobs/extra/mistral_params.json b/rally-jobs/extra/mistral_params.json deleted file mode 100644 index e75c3f826a..0000000000 --- a/rally-jobs/extra/mistral_params.json +++ /dev/null @@ -1 +0,0 @@ -{"env": {"env_param": "env_param_value"}} \ No newline at end of file diff --git a/rally-jobs/extra/mistral_wb.yaml b/rally-jobs/extra/mistral_wb.yaml deleted file mode 100644 index 98ccdceb93..0000000000 --- a/rally-jobs/extra/mistral_wb.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -version: "2.0" - -name: wb - -workflows: - wf1: - type: direct - input: - - input1: input1 - - some_json_input: {} - tasks: - hello: - action: std.echo output="Hello" - publish: - result: $ diff --git a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip b/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip deleted file mode 100644 index 690b1285da..0000000000 Binary files a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip and /dev/null differ diff --git a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/Classes/HelloReporter.yaml b/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/Classes/HelloReporter.yaml deleted file mode 100644 index 2eca9d0dfa..0000000000 --- a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/Classes/HelloReporter.yaml +++ /dev/null @@ -1,25 +0,0 @@ -Namespaces: - =: io.murano.apps - std: io.murano - sys: io.murano.system - - -Name: HelloReporter - -Extends: std:Application - -Properties: - name: - Contract: $.string().notNull() - -Workflow: - initialize: - Body: - - $.environment: $.find(std:Environment).require() - - deploy: - Body: - - If: not $.getAttr(deployed, false) - Then: - - $.environment.reporter.report($this, 'Starting deployment! Hello!') - - $.setAttr(deployed, True) diff --git a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/UI/ui.yaml b/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/UI/ui.yaml deleted file mode 100644 index 2d572f5f72..0000000000 --- a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/UI/ui.yaml +++ /dev/null @@ -1,23 +0,0 @@ -Version: 2 - -Application: - ?: - type: io.murano.apps.HelloReporter - name: $.appConfiguration.name - -Forms: - - appConfiguration: - fields: - - name: name - type: string - label: Application Name - description: >- - Enter a desired name for the application. Just A-Z, a-z, 0-9, dash and - underline are allowed - - name: unitNamingPattern - type: string - required: false - hidden: true - widgetMedia: - js: ['muranodashboard/js/support_placeholder.js'] - css: {all: ['muranodashboard/css/support_placeholder.css']} diff --git a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/manifest.yaml b/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/manifest.yaml deleted file mode 100644 index 58075461c6..0000000000 --- a/rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/manifest.yaml +++ /dev/null @@ -1,10 +0,0 @@ -Format: 1.0 -Type: Application -FullName: io.murano.apps.HelloReporter -Name: HelloReporter -Description: | - HelloReporter test app. -Author: 'Mirantis, Inc' -Tags: [] -Classes: - io.murano.apps.HelloReporter: HelloReporter.yaml diff --git a/rally-jobs/extra/murano/applications/README.rst b/rally-jobs/extra/murano/applications/README.rst deleted file mode 100644 index 5e006bdea3..0000000000 --- a/rally-jobs/extra/murano/applications/README.rst +++ /dev/null @@ -1,17 +0,0 @@ -Murano applications -=================== - -Files for Murano plugins - -Structure ---------- - -* / directories. Each directory store a simple Murano package - for environment deployment in Murano context. Also there can be other files - needs for application. - - -Useful links ------------- - -* `More about Murano package `_ diff --git a/rally-jobs/extra/random_strings.yaml.template b/rally-jobs/extra/random_strings.yaml.template deleted file mode 100644 index 2dd676c118..0000000000 --- a/rally-jobs/extra/random_strings.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/resource_group.yaml.template b/rally-jobs/extra/resource_group.yaml.template deleted file mode 100644 index b3f505fa67..0000000000 --- a/rally-jobs/extra/resource_group.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 2 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/resource_group_server_with_volume.yaml.template b/rally-jobs/extra/resource_group_server_with_volume.yaml.template deleted file mode 100644 index fbc8842a71..0000000000 --- a/rally-jobs/extra/resource_group_server_with_volume.yaml.template +++ /dev/null @@ -1,44 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template that creates a resource group with servers and volumes. - The template allows to create a lot of nested stacks with standard - configuration: nova instance, cinder volume attached to that instance - -parameters: - - num_instances: - type: number - description: number of instances that should be created in resource group - constraints: - - range: {min: 1} - instance_image: - type: string - default: cirros-0.3.5-x86_64-disk - instance_volume_size: - type: number - description: Size of volume to attach to instance - default: 1 - constraints: - - range: {min: 1, max: 1024} - instance_flavor: - type: string - description: Type of the instance to be created. - default: m1.tiny - instance_availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - -resources: - group_of_volumes: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: num_instances} - resource_def: - type: /home/jenkins/.rally/extra/server_with_volume.yaml.template - properties: - image: {get_param: instance_image} - volume_size: {get_param: instance_volume_size} - flavor: {get_param: instance_flavor} - availability_zone: {get_param: instance_availability_zone} diff --git a/rally-jobs/extra/resource_group_with_constraint.yaml.template b/rally-jobs/extra/resource_group_with_constraint.yaml.template deleted file mode 100644 index 6eca4bb44a..0000000000 --- a/rally-jobs/extra/resource_group_with_constraint.yaml.template +++ /dev/null @@ -1,21 +0,0 @@ -heat_template_version: 2013-05-23 - -description: Template for testing caching. - -parameters: - count: - type: number - default: 40 - delay: - type: number - default: 0.3 - -resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: count} - resource_def: - type: OS::Heat::TestResource - properties: - constraint_prop_secs: {get_param: delay} diff --git a/rally-jobs/extra/resource_group_with_outputs.yaml.template b/rally-jobs/extra/resource_group_with_outputs.yaml.template deleted file mode 100644 index f47d03ccc1..0000000000 --- a/rally-jobs/extra/resource_group_with_outputs.yaml.template +++ /dev/null @@ -1,37 +0,0 @@ -heat_template_version: 2013-05-23 -parameters: - attr_wait_secs: - type: number - default: 0.5 - -resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: 10 - resource_def: - type: OS::Heat::TestResource - properties: - attr_wait_secs: {get_param: attr_wait_secs} - -outputs: - val1: - value: {get_attr: [rg, resource.0.output]} - val2: - value: {get_attr: [rg, resource.1.output]} - val3: - value: {get_attr: [rg, resource.2.output]} - val4: - value: {get_attr: [rg, resource.3.output]} - val5: - value: {get_attr: [rg, resource.4.output]} - val6: - value: {get_attr: [rg, resource.5.output]} - val7: - value: {get_attr: [rg, resource.6.output]} - val8: - value: {get_attr: [rg, resource.7.output]} - val9: - value: {get_attr: [rg, resource.8.output]} - val10: - value: {get_attr: [rg, resource.9.output]} \ No newline at end of file diff --git a/rally-jobs/extra/server_with_ports.yaml.template b/rally-jobs/extra/server_with_ports.yaml.template deleted file mode 100644 index 0e344fc069..0000000000 --- a/rally-jobs/extra/server_with_ports.yaml.template +++ /dev/null @@ -1,64 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - public_net: - type: string - default: public - image: - type: string - default: cirros-0.3.5-x86_64-disk - flavor: - type: string - default: m1.tiny - cidr: - type: string - default: 11.11.11.0/24 - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - networks: - - port: { get_resource: server_port } - - router: - type: OS::Neutron::Router - properties: - external_gateway_info: - network: {get_param: public_net} - - router_interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: private_subnet } - - private_net: - type: OS::Neutron::Net - - private_subnet: - type: OS::Neutron::Subnet - properties: - network: { get_resource: private_net } - cidr: {get_param: cidr} - - port_security_group: - type: OS::Neutron::SecurityGroup - properties: - name: default_port_security_group - description: > - Default security group assigned to port. The neutron default group is not - used because neutron creates several groups with the same name=default and - nova cannot chooses which one should it use. - - server_port: - type: OS::Neutron::Port - properties: - network: {get_resource: private_net} - fixed_ips: - - subnet: { get_resource: private_subnet } - security_groups: - - { get_resource: port_security_group } diff --git a/rally-jobs/extra/server_with_volume.yaml.template b/rally-jobs/extra/server_with_volume.yaml.template deleted file mode 100644 index 6e65cec720..0000000000 --- a/rally-jobs/extra/server_with_volume.yaml.template +++ /dev/null @@ -1,39 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - image: - type: string - default: cirros-0.3.5-x86_64-disk - flavor: - type: string - default: m1.tiny - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server} - mountpoint: /dev/vdc diff --git a/rally-jobs/extra/updated_autoscaling_policy_inplace.yaml.template b/rally-jobs/extra/updated_autoscaling_policy_inplace.yaml.template deleted file mode 100644 index cf34879ca7..0000000000 --- a/rally-jobs/extra/updated_autoscaling_policy_inplace.yaml.template +++ /dev/null @@ -1,23 +0,0 @@ -heat_template_version: 2013-05-23 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates resource parameters without resource re-creation(replacement) - in the stack defined by autoscaling_policy.yaml.template. It allows to measure - performance of "pure" resource update operation only. - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: -1 \ No newline at end of file diff --git a/rally-jobs/extra/updated_random_strings_add.yaml.template b/rally-jobs/extra/updated_random_strings_add.yaml.template deleted file mode 100644 index e06d42e012..0000000000 --- a/rally-jobs/extra/updated_random_strings_add.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates the stack defined by random_strings.yaml.template with additional resource. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_three: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/updated_random_strings_delete.yaml.template b/rally-jobs/extra/updated_random_strings_delete.yaml.template deleted file mode 100644 index d02593e3b8..0000000000 --- a/rally-jobs/extra/updated_random_strings_delete.yaml.template +++ /dev/null @@ -1,11 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by random_strings.yaml.template. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/updated_random_strings_replace.yaml.template b/rally-jobs/extra/updated_random_strings_replace.yaml.template deleted file mode 100644 index 46d8bff4cb..0000000000 --- a/rally-jobs/extra/updated_random_strings_replace.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by - random_strings.yaml.template and re-creates it with the updated parameters - (so-called update-replace). That happens because some parameters cannot be - changed without resource re-creation. The template allows to measure performance - of update-replace operation. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 40 \ No newline at end of file diff --git a/rally-jobs/extra/updated_resource_group_increase.yaml.template b/rally-jobs/extra/updated_resource_group_increase.yaml.template deleted file mode 100644 index 891074ebc3..0000000000 --- a/rally-jobs/extra/updated_resource_group_increase.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource_group.yaml.template - and adds children resources to that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 3 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/updated_resource_group_reduce.yaml.template b/rally-jobs/extra/updated_resource_group_reduce.yaml.template deleted file mode 100644 index b4d1d1730a..0000000000 --- a/rally-jobs/extra/updated_resource_group_reduce.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource_group.yaml.template - and deletes children resources from that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 1 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/rally-jobs/extra/workload/wordpress_heat_template.yaml b/rally-jobs/extra/workload/wordpress_heat_template.yaml deleted file mode 100644 index 9cdb3e38ba..0000000000 --- a/rally-jobs/extra/workload/wordpress_heat_template.yaml +++ /dev/null @@ -1,219 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Heat WordPress template to support F23, using only Heat OpenStack-native - resource types, and without the requirement for heat-cfntools in the image. - WordPress is web software you can use to create a beautiful website or blog. - This template installs a single-instance WordPress deployment using a local - MySQL database to store the data. - -parameters: - - wp_instances_count: - type: number - default: 1 - - timeout: - type: number - description: Timeout for WaitCondition, seconds - default: 1000 - - router_id: - type: string - description: ID of the router - default: b9135c24-d998-4e2f-b0aa-2b0a40c21ae5 - - network_id: - type: string - description: ID of the network to allocate floating IP from - default: 4eabc459-0096-4479-b105-67ec0cff18cb - - key_name: - type: string - description : Name of a KeyPair to enable SSH access to the instance - default: nova-kp - - wp_instance_type: - type: string - description: Instance type for WordPress server - default: m1.small - - wp_image: - type: string - description: > - Name or ID of the image to use for the WordPress server. - Recommended value is fedora-23.x86_64; - http://cloud.fedoraproject.org/fedora-23.x86_64.qcow2. - default: fedora-23.x86_64 - - image: - type: string - description: > - Name or ID of the image to use for the gate-node. - default: fedora-23.x86_64 - - instance_type: - type: string - description: Instance type for gate-node. - default: m1.small - - - db_name: - type: string - description: WordPress database name - default: wordpress - constraints: - - length: { min: 1, max: 64 } - description: db_name must be between 1 and 64 characters - - allowed_pattern: '[a-zA-Z][a-zA-Z0-9]*' - description: > - db_name must begin with a letter and contain only alphanumeric - characters - db_username: - type: string - description: The WordPress database admin account username - default: admin - hidden: true - constraints: - - length: { min: 1, max: 16 } - description: db_username must be between 1 and 16 characters - - allowed_pattern: '[a-zA-Z][a-zA-Z0-9]*' - description: > - db_username must begin with a letter and contain only alphanumeric - characters - db_password: - type: string - description: The WordPress database admin account password - default: admin - hidden: true - constraints: - - length: { min: 1, max: 41 } - description: db_password must be between 1 and 41 characters - - allowed_pattern: '[a-zA-Z0-9]*' - description: db_password must contain only alphanumeric characters - db_root_password: - type: string - description: Root password for MySQL - default: admin - hidden: true - constraints: - - length: { min: 1, max: 41 } - description: db_root_password must be between 1 and 41 characters - - allowed_pattern: '[a-zA-Z0-9]*' - description: db_root_password must contain only alphanumeric characters - -resources: - wordpress_instances: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: wp_instances_count} - resource_def: - type: wp-instances.yaml - properties: - name: wp_%index% - image: { get_param: wp_image } - flavor: { get_param: wp_instance_type } - key_name: { get_param: key_name } - db_root_password: { get_param: db_root_password } - db_name: { get_param: db_name } - db_username: { get_param: db_username } - db_password: { get_param: db_password } - wc_notify: { get_attr: ['wait_handle', 'curl_cli'] } - subnet: {get_resource: subnet} - network: {get_resource: network} - security_group: {get_resource: security_group} - - gate_instance: - type: OS::Nova::Server - properties: - image: { get_param: image } - flavor: { get_param: instance_type } - key_name: { get_param: key_name } - networks: - - port: {get_resource: port_gate} - user_data_format: RAW - user_data: | - #cloud-config - packages: - - python - - siege - - httpd-tools - - security_group: - type: OS::Neutron::SecurityGroup - properties: - rules: - - port_range_max: null - port_range_min: null - protocol: icmp - remote_ip_prefix: 0.0.0.0/0 - - port_range_max: 80 - port_range_min: 80 - protocol: tcp - remote_ip_prefix: 0.0.0.0/0 - - port_range_max: 443 - port_range_min: 443 - protocol: tcp - remote_ip_prefix: 0.0.0.0/0 - - port_range_max: 22 - port_range_min: 22 - protocol: tcp - remote_ip_prefix: 0.0.0.0/0 - - network: - type: OS::Neutron::Net - properties: - name: wordpress-network - - subnet: - type: OS::Neutron::Subnet - properties: - cidr: 10.0.0.1/24 - dns_nameservers: [8.8.8.8] - ip_version: 4 - network: {get_resource: network} - - port_gate: - type: OS::Neutron::Port - properties: - fixed_ips: - - subnet: {get_resource: subnet} - network: {get_resource: network} - replacement_policy: AUTO - security_groups: - - {get_resource: security_group} - - floating_ip: - type: OS::Neutron::FloatingIP - properties: - port_id: {get_resource: port_gate} - floating_network: {get_param: network_id} - - router_interface: - type: OS::Neutron::RouterInterface - properties: - router_id: {get_param: router_id} - subnet: {get_resource: subnet} - - wait_condition: - type: OS::Heat::WaitCondition - properties: - handle: {get_resource: wait_handle} - count: {get_param: wp_instances_count} - timeout: {get_param: timeout} - - wait_handle: - type: OS::Heat::WaitConditionHandle - -outputs: - curl_cli: - value: { get_attr: ['wait_handle', 'curl_cli'] } - - wp_nodes: - value: { get_attr: ['wordpress_instances', 'attributes', 'ip'] } - - gate_node: - value: { get_attr: ['floating_ip', 'floating_ip_address'] } - - net_name: - value: { get_attr: ['network', 'name'] } diff --git a/rally-jobs/extra/workload/wp-instances.yaml b/rally-jobs/extra/workload/wp-instances.yaml deleted file mode 100644 index 9a04d42ac1..0000000000 --- a/rally-jobs/extra/workload/wp-instances.yaml +++ /dev/null @@ -1,82 +0,0 @@ -heat_template_version: 2014-10-16 - -parameters: - name: { type: string } - wc_notify: { type: string } - subnet: { type: string } - network: { type: string } - security_group: { type: string } - key_name: { type: string } - flavor: { type: string } - image: { type: string } - db_name: { type: string } - db_username: { type: string } - db_password: { type: string } - db_root_password: { type: string } - -resources: - wordpress_instance: - type: OS::Nova::Server - properties: - name: { get_param: name } - image: { get_param: image } - flavor: { get_param: flavor } - key_name: { get_param: key_name } - networks: - - port: {get_resource: port} - user_data_format: RAW - user_data: - str_replace: - template: | - #!/bin/bash -v - sudo yum -y install mariadb mariadb-server httpd wordpress curl - sudo touch /var/log/mariadb/mariadb.log - sudo chown mysql.mysql /var/log/mariadb/mariadb.log - sudo systemctl start mariadb.service - # Setup MySQL root password and create a user - sudo mysqladmin -u root password db_rootpassword - cat << EOF | mysql -u root --password=db_rootpassword - CREATE DATABASE db_name; - GRANT ALL PRIVILEGES ON db_name.* TO "db_user"@"localhost" - IDENTIFIED BY "db_password"; - FLUSH PRIVILEGES; - EXIT - EOF - sudo sed -i "/Deny from All/d" /etc/httpd/conf.d/wordpress.conf - sudo sed -i "s/Require local/Require all granted/" /etc/httpd/conf.d/wordpress.conf - sudo sed -i s/database_name_here/db_name/ /etc/wordpress/wp-config.php - sudo sed -i s/username_here/db_user/ /etc/wordpress/wp-config.php - sudo sed -i s/password_here/db_password/ /etc/wordpress/wp-config.php - sudo systemctl start httpd.service - IP=$(ip r get 8.8.8.8 | grep src | awk '{print $7}') - curl --data 'user_name=admin&password=123&password2=123&admin_email=asd@asd.com' http://$IP/wordpress/wp-admin/install.php?step=2 - mkfifo /tmp/data - (for i in $(seq 1000); do - echo -n "1,$i,$i,page," - head -c 100000 /dev/urandom | base64 -w 0 - echo - done - ) > /tmp/data & - mysql -u root --password=db_rootpassword wordpress -e 'LOAD DATA LOCAL INFILE "/tmp/data" INTO TABLE wp_posts FIELDS TERMINATED BY "," (post_author,post_title,post_name,post_type,post_content);' - sudo sh -c 'echo "172.16.0.6 mos80-ssl.fuel.local" >> /etc/hosts' - wc_notify --insecure --data-binary '{"status": "SUCCESS"}' - params: - db_rootpassword: { get_param: db_root_password } - db_name: { get_param: db_name } - db_user: { get_param: db_username } - db_password: { get_param: db_password } - wc_notify: { get_param: wc_notify } - - port: - type: OS::Neutron::Port - properties: - fixed_ips: - - subnet: {get_param: subnet} - network: {get_param: network} - replacement_policy: AUTO - security_groups: - - {get_param: security_group} - -outputs: - ip: - value: { get_attr: ['wordpress_instance', 'networks'] } diff --git a/rally-jobs/heat.yaml b/rally-jobs/heat.yaml deleted file mode 100644 index 1435562591..0000000000 --- a/rally-jobs/heat.yaml +++ /dev/null @@ -1,297 +0,0 @@ ---- - version: 2 - title: Task for gate-rally-dsvm-rally-heat-nv job - description: > - This task contains various scenarios for testing heat plugins - subtasks: - - - title: HeatStacks.create_and_list_stack tests - scenario: - HeatStacks.create_and_list_stack: - template_path: "~/.rally/extra/default.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: HeatStacks.create_and_delete_stack tests - workloads: - - - scenario: - HeatStacks.create_and_delete_stack: - template_path: "~/.rally/extra/default.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 3 - - - scenario: - HeatStacks.create_and_delete_stack: - template_path: "~/.rally/extra/server_with_volume.yaml.template" - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - scenario: - HeatStacks.create_and_delete_stack: - template_path: "~/.rally/extra/resource_group_server_with_volume.yaml.template" - parameters: - num_instances: 2 - files: ["~/.rally/extra/server_with_volume.yaml.template"] - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 1 - - - scenario: - HeatStacks.create_and_delete_stack: - template_path: "~/.rally/extra/resource_group_with_constraint.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - - title: HeatStacks.create_check_delete_stack tests - scenario: - HeatStacks.create_check_delete_stack: - template_path: "~/.rally/extra/random_strings.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: HeatStacks.create_update_delete_stack tests - workloads: - - - scenario: - HeatStacks.create_update_delete_stack: - template_path: "~/.rally/extra/random_strings.yaml.template" - updated_template_path: "~/.rally/extra/updated_random_strings_add.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - scenario: - HeatStacks.create_update_delete_stack: - template_path: "~/.rally/extra/random_strings.yaml.template" - updated_template_path: "~/.rally/extra/updated_random_strings_delete.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - scenario: - HeatStacks.create_update_delete_stack: - template_path: "~/.rally/extra/random_strings.yaml.template" - updated_template_path: "~/.rally/extra/updated_random_strings_replace.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - scenario: - HeatStacks.create_update_delete_stack: - template_path: "~/.rally/extra/autoscaling_policy.yaml.template" - updated_template_path: "~/.rally/extra/updated_autoscaling_policy_inplace.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - scenario: - HeatStacks.create_update_delete_stack: - template_path: "~/.rally/extra/resource_group.yaml.template" - updated_template_path: "~/.rally/extra/updated_resource_group_increase.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 3 - - - scenario: - HeatStacks.create_update_delete_stack: - template_path: "~/.rally/extra/resource_group.yaml.template" - updated_template_path: "~/.rally/extra/updated_resource_group_reduce.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 3 - - - - title: HeatStacks.create_suspend_resume_delete_stack tests - scenario: - HeatStacks.create_suspend_resume_delete_stack: - template_path: "~/.rally/extra/random_strings.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 3 - - - title: HeatStacks.create_snapshot_restore_delete_stack tests - scenario: - HeatStacks.create_snapshot_restore_delete_stack: - template_path: "~/.rally/extra/random_strings.yaml.template" - runner: - constant: - times: 6 - concurrency: 3 - contexts: - users: - tenants: 2 - users_per_tenant: 3 - - - title: HeatStacks.create_stack_and_scale tests - workloads: - - - scenario: - HeatStacks.create_stack_and_scale: - template_path: "~/.rally/extra/autoscaling_group.yaml.template" - output_key: "scaling_url" - delta: 1 - parameters: - scaling_adjustment: 1 - runner: - constant: - times: 2 - concurrency: 1 - contexts: - users: - tenants: 2 - users_per_tenant: 1 - - - scenario: - HeatStacks.create_stack_and_scale: - template_path: "~/.rally/extra/autoscaling_group.yaml.template" - output_key: "scaling_url" - delta: -1 - parameters: - scaling_adjustment: -1 - runner: - constant: - times: 2 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 1 - - - title: HeatStacks.create_stack_and_list_output tests - scenario: - HeatStacks.create_stack_and_list_output: - template_path: "~/.rally/extra/resource_group_with_outputs.yaml.template" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: HeatStacks.create_stack_and_list_output_via_API tests - scenario: - HeatStacks.create_stack_and_list_output_via_API: - template_path: "~/.rally/extra/resource_group_with_outputs.yaml.template" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: HeatStacks.create_stack_and_show_output tests - scenario: - HeatStacks.create_stack_and_show_output: - template_path: "~/.rally/extra/resource_group_with_outputs.yaml.template" - output_key: "val1" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: HeatStacks.create_stack_and_show_output_via_API tests - scenario: - HeatStacks.create_stack_and_show_output_via_API: - template_path: "~/.rally/extra/resource_group_with_outputs.yaml.template" - output_key: "val1" - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: Authenticate.validate_heat tests - scenario: - Authenticate.validate_heat: - repetitions: 2 - runner: - constant: - times: 10 - concurrency: 5 - contexts: - users: - tenants: 3 - users_per_tenant: 5 diff --git a/rally-jobs/rally-designate.yaml b/rally-jobs/rally-designate.yaml deleted file mode 100644 index 131b95dc78..0000000000 --- a/rally-jobs/rally-designate.yaml +++ /dev/null @@ -1,182 +0,0 @@ ---- - version: 2 - title: Task for gate-rally-dsvm-designate-rally-pdns4-ubuntu-xenial-nv job - description: > - This task contains various scenarios for testing designate plugins - subtasks: - - - title: DesignateBasic.create_and_delete_domain tests - scenario: - DesignateBasic.create_and_delete_domain: {} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.create_and_update_domain tests - scenario: - DesignateBasic.create_and_update_domain: {} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.create_and_delete_records tests - scenario: - DesignateBasic.create_and_delete_records: - records_per_domain: 5 - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.create_and_list_domains tests - scenario: - DesignateBasic.create_and_list_domains: {} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.create_and_list_records tests - scenario: - DesignateBasic.create_and_list_records: - records_per_domain: 5 - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.list_domains tests - scenario: - DesignateBasic.list_domains: {} - runner: - constant: - times: 3 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.create_and_list_servers tests - scenario: - DesignateBasic.create_and_list_servers: {} - runner: - constant: - times: 4 - concurrency: 1 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.create_and_delete_server tests - scenario: - DesignateBasic.create_and_delete_server: {} - runner: - constant: - times: 4 - concurrency: 1 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.list_servers tests - scenario: - DesignateBasic.list_servers: {} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.create_and_list_zones tests - scenario: - DesignateBasic.create_and_list_zones: {} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.create_and_delete_zone tests - scenario: - DesignateBasic.create_and_delete_zone: {} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: DesignateBasic.create_and_list_recordsets tests - scenario: - DesignateBasic.create_and_list_recordsets: {} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 - - - title: DesignateBasic.create_and_delete_recordsets tests - scenario: - DesignateBasic.create_and_delete_recordsets: {} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 - - - title: DesignateBasic.list_zones tests - scenario: - DesignateBasic.list_zones: {} - runner: - constant: - times: 4 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 10 diff --git a/rally-jobs/rally-ironic.yaml b/rally-jobs/rally-ironic.yaml deleted file mode 100644 index 41852c0347..0000000000 --- a/rally-jobs/rally-ironic.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- - version: 2 - title: Task for gate-rally-dsvm-ironic-rally-nv job - description: > - This task contains various scenarios for testing ironic plugins - subtasks: - - - title: IronicNodes.create_and_list_node tests - scenario: - IronicNodes.create_and_list_node: - driver: "fake" - properties: - capabilities: "boot_option:local" - runner: - constant: - times: 100 - concurrency: 20 - contexts: - users: - tenants: 5 - users_per_tenant: 1 - - - title: IronicNodes.create_and_delete_node tests - scenario: - IronicNodes.create_and_delete_node: - driver: "fake" - properties: - capabilities: "boot_option:local" - runner: - constant: - times: 100 - concurrency: 20 - contexts: - users: - tenants: 5 - users_per_tenant: 1 diff --git a/rally-jobs/rally-keystone-api-v2.yaml b/rally-jobs/rally-keystone-api-v2.yaml deleted file mode 120000 index 66fa5e5a0a..0000000000 --- a/rally-jobs/rally-keystone-api-v2.yaml +++ /dev/null @@ -1 +0,0 @@ -rally.yaml \ No newline at end of file diff --git a/rally-jobs/rally-magnum.yaml b/rally-jobs/rally-magnum.yaml deleted file mode 100644 index cd1322d46d..0000000000 --- a/rally-jobs/rally-magnum.yaml +++ /dev/null @@ -1,78 +0,0 @@ -{% set image = "Fedora-Atomic-26-20170723.0.x86_64" %} ---- - version: 2 - title: Task for gate-rally-dsvm-magnum-rally-nv job - description: > - This task contains various subtasks for testing magnum plugins - subtasks: - - - title: MagnumClusterTemplates.list_cluster_templates tests - workloads: - - - scenario: - MagnumClusterTemplates.list_cluster_templates: {} - runner: - constant: - times: 40 - concurrency: 20 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: {{ image }} - flavor_id: "m1.small" - master_flavor_id: "m1.small" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - docker_volume_size: 5 - coe: "kubernetes" - network_driver: "flannel" - docker_storage_driver: "devicemapper" - master_lb_enabled: False - - - scenario: - MagnumClusterTemplates.list_cluster_templates: {} - runner: - constant: - times: 40 - concurrency: 20 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: {{ image }} - flavor_id: "m1.small" - master_flavor_id: "m1.small" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - docker_volume_size: 5 - coe: "swarm" - network_driver: "docker" - docker_storage_driver: "devicemapper" - master_lb_enabled: False - - - title: MagnumClusters.create_and_list_clusters tests - scenario: - MagnumClusters.create_and_list_clusters: - node_count: 1 - runner: - constant: - times: 1 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: {{ image }} - flavor_id: "m1.small" - master_flavor_id: "m1.small" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - docker_volume_size: 5 - coe: "swarm" - network_driver: "docker" - docker_storage_driver: "devicemapper" - master_lb_enabled: False diff --git a/rally-jobs/rally-manila-no-ss.yaml b/rally-jobs/rally-manila-no-ss.yaml deleted file mode 100644 index c52e8410ec..0000000000 --- a/rally-jobs/rally-manila-no-ss.yaml +++ /dev/null @@ -1,169 +0,0 @@ ---- - version: 2 - title: Task for gate-rally-dsvm-manila-multibackend-no-ss job - description: > - This task contains various subtasks for testing manila plugins - subtasks: - - - title: Test Manila Quotas context - scenario: - Dummy.openstack: {} - runner: - constant: - times: 1 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - shares: -1 - gigabytes: -1 - snapshots: -1 - snapshot_gigabytes: -1 - share_networks: -1 - - - title: ManilaShares.list_shares tests - scenario: - ManilaShares.list_shares: - detailed: True - runner: - constant: - times: 10 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: ManilaShares.create_share_then_allow_and_deny_access tests - scenario: - ManilaShares.create_share_then_allow_and_deny_access: - share_proto: "nfs" - share_type: "dhss_false" - size: 1 - access: "127.0.0.1" - access_type: "ip" - runner: - constant: - times: 2 - concurrency: 2 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - - - title: ManilaShares.create_and_delete_share tests - scenario: - ManilaShares.create_and_delete_share: - share_proto: "nfs" - size: 1 - share_type: "dhss_false" - min_sleep: 1 - max_sleep: 2 - runner: - constant: - times: 4 - concurrency: 4 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - - - title: ManilaShares.create_and_list_share tests - scenario: - ManilaShares.create_and_list_share: - share_proto: "nfs" - size: 1 - share_type: "dhss_false" - min_sleep: 1 - max_sleep: 2 - runner: - constant: - times: 4 - concurrency: 4 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - - - title: ManilaShares.create_and_extend_share tests - scenario: - ManilaShares.create_and_extend_share: - share_proto: "nfs" - size: 1 - share_type: "dhss_false" - new_size: 2 - runner: - constant: - times: 4 - concurrency: 4 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - - - title: ManilaShares.create_and_shrink_share tests - scenario: - ManilaShares.create_and_shrink_share: - share_proto: "nfs" - size: 2 - share_type: "dhss_false" - new_size: 1 - runner: - constant: - times: 4 - concurrency: 4 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - - - title: ManilaShares.set_and_delete_metadata tests - scenario: - ManilaShares.set_and_delete_metadata: - sets: 1 - set_size: 3 - delete_size: 3 - key_min_length: 1 - key_max_length: 256 - value_min_length: 1 - value_max_length: 1024 - runner: - constant: - times: 10 - concurrency: 10 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - share_type: "dhss_false" diff --git a/rally-jobs/rally-manila.yaml b/rally-jobs/rally-manila.yaml deleted file mode 100644 index 19ba4b8570..0000000000 --- a/rally-jobs/rally-manila.yaml +++ /dev/null @@ -1,281 +0,0 @@ ---- - version: 2 - title: Task for gate-rally-dsvm-manila-multibackend-no-ss job - description: > - This task contains various subtasks for testing manila plugins - subtasks: - - - title: Test Manila Quotas context - scenario: - Dummy.openstack: {} - runner: - constant: - times: 1 - concurrency: 1 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - shares: -1 - gigabytes: -1 - snapshots: -1 - snapshot_gigabytes: -1 - share_networks: -1 - - - title: ManilaShares.list_shares tests - scenario: - ManilaShares.list_shares: - detailed: True - runner: - constant: - times: 12 - concurrency: 4 - contexts: - users: - tenants: 3 - users_per_tenant: 4 - user_choice_method: "round_robin" - - - title: ManilaShares.create_and_extend_share tests - scenario: - ManilaShares.create_and_extend_share: - share_proto: "nfs" - size: 1 - new_size: 2 - share_type: "dhss_true" - runner: - constant: - times: 4 - concurrency: 4 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_share_networks: - use_share_networks: True - - - title: ManilaShares.create_and_shrink_share tests - scenario: - ManilaShares.create_and_shrink_share: - share_proto: "nfs" - size: 2 - new_size: 1 - share_type: "dhss_true" - runner: - constant: - times: 4 - concurrency: 4 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_share_networks: - use_share_networks: True - - - title: ManilaShares.create_share_then_allow_and_deny_access tests - scenario: - ManilaShares.create_share_then_allow_and_deny_access: - share_proto: "nfs" - size: 1 - share_type: "dhss_true" - access: "127.0.0.1" - access_type: "ip" - runner: - constant: - times: 4 - concurrency: 4 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_share_networks: - use_share_networks: True - - - title: ManilaShares.create_and_delete_share tests - scenario: - ManilaShares.create_and_delete_share: - share_proto: "nfs" - size: 1 - share_type: "dhss_true" - min_sleep: 1 - max_sleep: 2 - runner: - constant: - times: 4 - concurrency: 4 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_share_networks: - use_share_networks: True - - - title: ManilaShares.create_and_list_share tests - scenario: - ManilaShares.create_and_list_share: - share_proto: "nfs" - size: 1 - share_type: "dhss_true" - min_sleep: 1 - max_sleep: 2 - runner: - constant: - times: 4 - concurrency: 4 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_share_networks: - use_share_networks: True - - - title: ManilaShares.create_share_network_and_delete tests - scenario: - ManilaShares.create_share_network_and_delete: - name: "rally" - runner: - constant: - times: 10 - concurrency: 10 - contexts: - quotas: - manila: - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - - - title: ManilaShares.create_share_network_and_list tests - scenario: - ManilaShares.create_share_network_and_list: - name: "rally" - detailed: True - search_opts: - name: "rally" - runner: - constant: - times: 10 - concurrency: 10 - contexts: - quotas: - manila: - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - - - title: ManilaShares.list_share_servers tests - scenario: - ManilaShares.list_share_servers: - search_opts: {} - runner: - constant: - times: 10 - concurrency: 10 - - - title: ManilaShares.create_security_service_and_delete tests - workloads: - {% for s in ("ldap", "kerberos", "active_directory") %} - - - scenario: - ManilaShares.create_security_service_and_delete: - security_service_type: {{s}} - dns_ip: "fake_dns_ip" - server: "fake-server" - domain: "fake_domain" - user: "fake_user" - password: "fake_password" - name: "fake_name" - description: "fake_description" - runner: - constant: - times: 10 - concurrency: 10 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - {% endfor %} - - - title: ManilaShares.attach_security_service_to_share_network tests - workloads: - {% for s in ("ldap", "kerberos", "active_directory") %} - - - scenario: - ManilaShares.attach_security_service_to_share_network: - security_service_type: {{s}} - runner: - constant: - times: 10 - concurrency: 10 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - share_networks: -1 - {% endfor %} - - - title: ManilaShares.set_and_delete_metadata tests - scenario: - ManilaShares.set_and_delete_metadata: - sets: 1 - set_size: 3 - delete_size: 3 - key_min_length: 1 - key_max_length: 256 - value_min_length: 1 - value_max_length: 1024 - runner: - constant: - times: 10 - concurrency: 10 - contexts: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_share_networks: - use_share_networks: True - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - share_type: "dhss_true" diff --git a/rally-jobs/rally-mistral.yaml b/rally-jobs/rally-mistral.yaml deleted file mode 100644 index ee02f8ce96..0000000000 --- a/rally-jobs/rally-mistral.yaml +++ /dev/null @@ -1,97 +0,0 @@ ---- - version: 2 - title: Task for gate-rally-dsvm-mistral-rally-ubuntu-xenial-nv job - description: > - This task contains various subtasks for testing mistral plugins - subtasks: - - - title: MistralWorkbooks.list_workbooks tests - scenario: - MistralWorkbooks.list_workbooks: {} - runner: - constant: - times: 50 - concurrency: 10 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: MistralWorkbooks.create_workbook tests - workloads: - - - scenario: - MistralWorkbooks.create_workbook: - definition: "~/.rally/extra/mistral_wb.yaml" - runner: - constant: - times: 50 - concurrency: 10 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - scenario: - MistralWorkbooks.create_workbook: - definition: "~/.rally/extra/mistral_wb.yaml" - do_delete: true - runner: - constant: - times: 50 - concurrency: 10 - contexts: - users: - tenants: 1 - users_per_tenant: 1 - - - title: MistralExecutions.list_executions tests - scenario: - MistralExecutions.list_executions: {} - runner: - constant: - times: 50 - concurrency: 10 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - title: MistralExecutions.create_execution_from_workbook tests - workloads: - - - description: MistralExecutions.create_execution_from_workbook scenario\ - with delete option - scenario: - MistralExecutions.create_execution_from_workbook: - definition: "~/.rally/extra/mistral_wb.yaml" - workflow_name: "wf1" - params: "~/.rally/extra/mistral_params.json" - wf_input: "~/.rally/extra/mistral_input.json" - do_delete: true - runner: - constant: - times: 50 - concurrency: 10 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - - - description: MistralExecutions.create_execution_from_workbook scenario\ - without delete option - scenario: - MistralExecutions.create_execution_from_workbook: - definition: "~/.rally/extra/mistral_wb.yaml" - workflow_name: "wf1" - params: "~/.rally/extra/mistral_params.json" - wf_input: "~/.rally/extra/mistral_input.json" - do_delete: false - runner: - constant: - times: 50 - concurrency: 10 - contexts: - users: - tenants: 2 - users_per_tenant: 2 diff --git a/rally-jobs/rally-monasca.yaml b/rally-jobs/rally-monasca.yaml deleted file mode 100644 index bc644bbc51..0000000000 --- a/rally-jobs/rally-monasca.yaml +++ /dev/null @@ -1,44 +0,0 @@ ---- - version: 2 - title: Task for gate-rally-dsvm-monasca-rally-ubuntu-xenial-nv job - description: > - This task contains various subtasks for testing Monasca plugins - subtasks: - - - title: MonascaMetrics.list_metrics tests - workloads: - - - scenario: - MonascaMetrics.list_metrics: {} - runner: - constant: - times: 10 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - roles: - - "monasca-user" - monasca_metrics: - "dimensions": - "region": "RegionOne" - "service": "identity" - "hostname": "fake_host" - "url": "http://fake_host:5000/v2.0" - "metrics_per_tenant": 10 - - - scenario: - MonascaMetrics.list_metrics: {} - runner: - constant: - times: 10 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - roles: - - "monasca-user" - monasca_metrics: - "metrics_per_tenant": 10 diff --git a/rally-jobs/rally-mos.yaml b/rally-jobs/rally-mos.yaml deleted file mode 100644 index 093246d250..0000000000 --- a/rally-jobs/rally-mos.yaml +++ /dev/null @@ -1,859 +0,0 @@ ---- -{%- set cirros_image_url = "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" %} -{%- set keystone_version = keystone_version|default("v2.0") %} -{% if keystone_version == "v2.0" %} - - SaharaNodeGroupTemplates.create_and_list_node_group_templates: - - - args: - hadoop_version: "{{sahara_hadoop_version}}" - flavor: - name: "m1.small" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - sahara: - service_type: {{sahara_service_type}} - sla: - failure_rate: - max: 0 - - SaharaNodeGroupTemplates.create_delete_node_group_templates: - - - args: - hadoop_version: "{{sahara_hadoop_version}}" - flavor: - name: "m1.small" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - api_versions: - sahara: - service_type: {{sahara_service_type}} - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_tenants: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_tenant: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_tenant_with_users: - - - args: - users_per_tenant: 10 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - sla: - failure_rate: - max: 0 - -{% endif %} - - KeystoneBasic.create_user: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_delete_user: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - KeystoneBasic.create_and_list_users: - - - args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - - HeatStacks.create_and_list_stack: - - - args: - template_path: "~/.rally/extra/default.yaml.template" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - HeatStacks.create_and_delete_stack: - - - args: - template_path: "~/.rally/extra/default.yaml.template" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.keystone: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.validate_cinder: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.validate_glance: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.validate_heat: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Authenticate.validate_nova: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.cinder_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.nova_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - Quotas.nova_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - VMTasks.boot_runcommand_delete: - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - floating_network: "{{external_net}}" - use_floating_ip: true - command: - script_file: "~/.rally/extra/instance_test.sh" - interpreter: "/bin/sh" - username: "cirros" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 - - - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - - args: - auto_assign_nic: true - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - NovaServers.boot_and_list_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - detailed: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.list_servers: - - - args: - detailed: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - servers: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - servers_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_and_bounce_server: - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - actions: - - - hard_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NovaServers.boot_server: - - - args: - flavor: - name: "^ram64$" - image: - name: "TestVM|cirros.*uec" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - flavors: - - - name: "ram64" - ram: 64 - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: "m1.tiny" - image: - name: "TestVM|cirros.*uec" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_networks: - - - args: - network_create_args: - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_subnets: - - - args: - network_create_args: - subnet_create_args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: - subnet_create_args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: - port_create_args: - ports_per_network: 4 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - port: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: False - name: "_updated" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnets_per_network: 2 - subnet_update_args: - enable_dhcp: False - name: "_subnet_updated" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 5 - users_per_tenant: 5 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - router_update_args: - admin_state_up: False - name: "_router_updated" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 5 - port_update_args: - admin_state_up: False - device_id: "dummy_id" - device_owner: "dummy_owner" - name: "_port_updated" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 - - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 10 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - network: {} - users: - tenants: 1 - users_per_tenant: 1 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_upload_volume_to_image: - - - args: - size: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_volume_backup: - - - args: - size: 1 - do_delete: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_restore_volume_backup: - - - args: - size: 1 - do_delete: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CinderVolumes.create_and_list_volume_backups: - - - args: - size: 1 - detailed: True - do_delete: True - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - VMTasks.runcommand_heat: - - - args: - workload: - resource: ["rally.plugins.workload", "siege.py"] - username: "fedora" - template: /home/rally/.rally/extra/workload/wordpress_heat_template.yaml - files: - wp-instances.yaml: /home/rally/.rally/extra/workload/wp-instances.yaml - parameters: - wp_instances_count: 2 - wp_instance_type: gig - instance_type: gig - wp_image: fedora - image: fedora - network_id: {{external_net_id}} - context: - users: - tenants: 1 - users_per_tenant: 1 - flavors: - - name: gig - ram: 1024 - disk: 4 - vcpus: 1 - runner: - concurrency: 1 - timeout: 3000 - times: 1 - type: constant - sla: - failure_rate: - max: 100 - - GlanceImages.create_and_update_image: - - - args: - image_location: "{{ cirros_image_url }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - glance: - version: 1 - sla: - failure_rate: - max: 100 diff --git a/rally-jobs/rally-murano.yaml b/rally-jobs/rally-murano.yaml deleted file mode 100644 index d346b79030..0000000000 --- a/rally-jobs/rally-murano.yaml +++ /dev/null @@ -1,146 +0,0 @@ ---- - MuranoEnvironments.list_environments: - - - runner: - type: "constant" - times: 30 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_environments: - environments_per_tenant: 2 - sla: - failure_rate: - max: 0 - - MuranoEnvironments.create_and_delete_environment: - - - runner: - type: "constant" - times: 20 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - MuranoEnvironments.create_and_deploy_environment: - - - args: - packages_per_env: 2 - runner: - type: "constant" - times: 8 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - roles: - - "admin" - sla: - failure_rate: - max: 0 - - - args: - packages_per_env: 2 - runner: - type: "constant" - times: 8 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - roles: - - "admin" - - MuranoPackages.import_and_list_packages: - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - MuranoPackages.import_and_delete_package: - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - MuranoPackages.import_and_filter_applications: - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - filter_query: {"category" : "Web"} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - MuranoPackages.package_lifecycle: - - - args: - package: "~/.rally/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - body: {"categories": ["Web"]} - operation: "add" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-neutron-extensions.yaml b/rally-jobs/rally-neutron-extensions.yaml deleted file mode 100644 index 44ad69f805..0000000000 --- a/rally-jobs/rally-neutron-extensions.yaml +++ /dev/null @@ -1,148 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NeutronLoadbalancerV2.create_and_list_loadbalancers: - - - args: - lb_create_args: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_list_bgpvpns: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_update_bgpvpns: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_delete_bgpvpns: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - servers: - servers_per_tenant: 1 - auto_assign_nic: True - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - servers: - servers_per_tenant: 1 - auto_assign_nic: True - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_list_networks_associations: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - servers: - servers_per_tenant: 1 - auto_assign_nic: True - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - sla: - failure_rate: - max: 0 - - NeutronBGPVPN.create_and_list_routers_associations: - - - runner: - type: "constant" - times: 8 - concurrency: 4 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - servers: - servers_per_tenant: 1 - auto_assign_nic: True - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - sla: - failure_rate: - max: 0 diff --git a/rally-jobs/rally-senlin.yaml b/rally-jobs/rally-senlin.yaml deleted file mode 100644 index 4d634f7876..0000000000 --- a/rally-jobs/rally-senlin.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - version: 2 - title: Task for gate-rally-dsvm-senlin-rally-ubuntu-xenial-nv job - description: > - This task contains various scenarios for testing senlin plugins - subtasks: - - - title: SenlinClusters.create_and_delete_cluster tests - scenario: - SenlinClusters.create_and_delete_cluster: - desired_capacity: 3 - min_size: 0 - max_size: 5 - runner: - constant: - times: 3 - concurrency: 2 - contexts: - users: - tenants: 2 - users_per_tenant: 2 - profiles: - type: os.nova.server - version: "1.0" - properties: - name: cirros_server - flavor: 1 - image: "cirros-0.3.5-x86_64-disk" - networks: - - network: private diff --git a/rally-jobs/rally-zaqar.yaml b/rally-jobs/rally-zaqar.yaml deleted file mode 100644 index af2d5f6619..0000000000 --- a/rally-jobs/rally-zaqar.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - version: 2 - title: Task for gate-rally-dsvm-zaqar-rally-ubuntu-xenial-nv job - description: > - This task contains various scenarios for testing zaqar plugins - subtasks: - - - title: ZaqarBasic.create_queue test - scenario: - ZaqarBasic.create_queue: {} - runner: - constant: - times: 100 - concurrency: 10 - - - title: ZaqarBasic.producer_consumer test - scenario: - ZaqarBasic.producer_consumer: - min_msg_count: 50 - max_msg_count: 200 - runner: - constant: - times: 100 - concurrency: 10 diff --git a/rally-jobs/telemetry-neutron.yaml b/rally-jobs/telemetry-neutron.yaml deleted file mode 100644 index e71d7f467e..0000000000 --- a/rally-jobs/telemetry-neutron.yaml +++ /dev/null @@ -1,429 +0,0 @@ -{% set image_name = "^cirros.*-disk$" %} -{% set flavor_name = "m1.nano" %} -{% set smoke = 0 %} ---- - - CeilometerEvents.create_user_and_get_event: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerEvents.create_user_and_list_event_types: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerEvents.create_user_and_list_events: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerTraits.create_user_and_list_trait_descriptions: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerTraits.create_user_and_list_traits: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerMeters.list_meters: - - - runner: - type: constant - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - ceilometer: - counter_name: "rally_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 1 - samples_per_resource: 1 - timestamp_interval: 1 - sla: - failure_rate: - max: 0 - - CeilometerResource.list_resources: - - - runner: - type: constant - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - ceilometer: - counter_name: "rally_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 1 - samples_per_resource: 1 - timestamp_interval: 1 - sla: - failure_rate: - max: 0 - - CeilometerSamples.list_samples: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resources_per_tenant: 3 - samples_per_resource: 10 - timestamp_interval: 60 - metadata_list: - - status: "active" - name: "fake_resource" - deleted: "False" - created_at: "2015-09-04T12:34:19.000000" - - status: "not_active" - name: "fake_resource_1" - deleted: "False" - created_at: "2015-09-10T06:55:12.000000" - batch_size: 5 - sla: - failure_rate: - max: 0 - - CeilometerResource.get_tenant_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_volume: 1.0 - counter_unit: "instance" - resources_per_tenant: 3 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_and_delete_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_and_list_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_and_get_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_and_update_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.create_alarm_and_get_history: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - state: "ok" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - CeilometerAlarms.list_alarms: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerQueries.create_and_query_alarms: - - - args: - filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} - orderby: !!null - limit: 10 - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerQueries.create_and_query_alarm_history: - - - args: - orderby: !!null - limit: !!null - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - CeilometerStats.get_stats: - - - runner: - type: constant - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "rally_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 5 - samples_per_resource: 5 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally on" - deleted: "false" - - - status: "terminated" - name: "rally off" - deleted: "true" - args: - meter_name: "rally_meter" - filter_by_user_id: true - filter_by_project_id: true - filter_by_resource_id: true - metadata_query: - status: "terminated" - period: 300 - groupby: "resource_id" - sla: - failure_rate: - max: 0 - - CeilometerQueries.create_and_query_samples: - - - args: - filter: {"=": {"counter_unit": "instance"}} - orderby: !!null - limit: 10 - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: "1.0" - resource_id: "resource_id" - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/rally/api.py b/rally/api.py index 600f1c3f12..30595473d5 100644 --- a/rally/api.py +++ b/rally/api.py @@ -137,16 +137,6 @@ class _Deployment(APIGroup): def get(self, deployment): return self._get(deployment).to_dict() - def service_list(self, deployment): - """Get the services list. - - :param deployment: Deployment object - :returns: Service list - """ - # TODO(astudenov): make this method platform independent - admin = deployment.get_credentials_for("openstack")["admin"] - return admin.list_services() - def list(self, status=None, parent_uuid=None, name=None): """Get the deployments list. diff --git a/rally/common/objects/__init__.py b/rally/common/objects/__init__.py index 8eb6995e46..af9ac0dd61 100644 --- a/rally/common/objects/__init__.py +++ b/rally/common/objects/__init__.py @@ -14,7 +14,6 @@ # under the License. """Contains the Rally objects.""" -from rally.common.objects.credential import Credential # noqa from rally.common.objects.deploy import Deployment # noqa from rally.common.objects.task import Subtask # noqa from rally.common.objects.task import Task # noqa diff --git a/rally/common/objects/credential.py b/rally/common/objects/credential.py deleted file mode 100644 index 2643ddd479..0000000000 --- a/rally/common/objects/credential.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.plugins.openstack import credential - - -LOG = logging.getLogger(__name__) - - -class Credential(credential.OpenStackCredential): - """Deprecated version of OpenStackCredential class""" - def __init__(self, *args, **kwargs): - super(Credential, self).__init__(*args, **kwargs) - LOG.warning("Class rally.common.objects.Credential is deprecated " - "since Rally 0.11.0. Use raw dict for OpenStack " - "credentials instead.") - - def to_dict(self, include_permission=False): - dct = super(Credential, self).to_dict() - if not include_permission: - dct.pop("permission") - return dct diff --git a/rally/common/objects/deploy.py b/rally/common/objects/deploy.py index 2dea10f42c..86c2c3cca7 100644 --- a/rally/common/objects/deploy.py +++ b/rally/common/objects/deploy.py @@ -94,12 +94,6 @@ class Deployment(object): return self._env def __getitem__(self, key): - # TODO(astudenov): remove this in future releases - if key == "admin" or key == "users": - LOG.warning("deployment.%s is deprecated in Rally 0.9.0. " - "Use deployment.get_credentials_for('openstack')" - "['%s'] to get credentials." % (key, key)) - return self.get_credentials_for("openstack")[key] if key == "status": status = self._env.status return _STATUS_NEW_TO_OLD.get(status, status) @@ -160,16 +154,20 @@ class Deployment(object): all_credentials = {} for platform, credentials in self._all_credentials.items(): if platform == "openstack": - from rally.plugins.openstack import credential - - admin = credentials[0]["admin"] - if admin: - admin = credential.OpenStackCredential( - permission=consts.EndpointPermission.ADMIN, **admin) - all_credentials[platform] = [{ - "admin": admin, - "users": [credential.OpenStackCredential(**user) - for user in credentials[0]["users"]]}] + try: + from rally_openstack import credential + except ImportError: + all_credentials[platform] = credentials + else: + admin = credentials[0]["admin"] + if admin: + admin = credential.OpenStackCredential( + permission=consts.EndpointPermission.ADMIN, + **admin) + all_credentials[platform] = [{ + "admin": admin, + "users": [credential.OpenStackCredential(**user) + for user in credentials[0]["users"]]}] else: all_credentials[platform] = credentials return all_credentials diff --git a/rally/common/opts.py b/rally/common/opts.py index 993d59e48b..42d8cb5110 100644 --- a/rally/common/opts.py +++ b/rally/common/opts.py @@ -16,7 +16,6 @@ import importlib from rally.common import cfg from rally.common import logging -from rally.plugins.openstack.cfg import opts as openstack_opts from rally.task import engine CONF = cfg.CONF @@ -25,10 +24,6 @@ CONF = cfg.CONF def list_opts(): merged_opts = {"DEFAULT": []} - for category, options in openstack_opts.list_opts().items(): - merged_opts.setdefault(category, []) - merged_opts[category].extend(options) - merged_opts["DEFAULT"].extend(logging.DEBUG_OPTS) merged_opts["DEFAULT"].extend(engine.TASK_ENGINE_OPTS) diff --git a/rally/osclients.py b/rally/osclients.py deleted file mode 100644 index 52d73ba382..0000000000 --- a/rally/osclients.py +++ /dev/null @@ -1,9 +0,0 @@ -from rally.common import logging - - -LOG = logging.getLogger(__name__) -LOG.warning("rally.osclients module moved to rally.plugins.openstack.osclients" - "rally.osclients module is going to be removed.") - - -from rally.plugins.openstack.osclients import * # noqa diff --git a/rally/plugins/common/types.py b/rally/plugins/common/types.py index f3c0c639ac..ca349dfe00 100644 --- a/rally/plugins/common/types.py +++ b/rally/plugins/common/types.py @@ -22,7 +22,7 @@ from rally.task import types @plugin.configure(name="path_or_url") -class PathOrUrl(types.ResourceType, types.DeprecatedBehaviourMixin): +class PathOrUrl(types.ResourceType): """Check whether file exists or url available.""" def pre_process(self, resource_spec, config): @@ -41,7 +41,7 @@ class PathOrUrl(types.ResourceType, types.DeprecatedBehaviourMixin): @plugin.configure(name="file") -class FileType(types.ResourceType, types.DeprecatedBehaviourMixin): +class FileType(types.ResourceType): """Return content of the file by its path.""" def pre_process(self, resource_spec, config): @@ -50,7 +50,7 @@ class FileType(types.ResourceType, types.DeprecatedBehaviourMixin): @plugin.configure(name="expand_user_path") -class ExpandUserPath(types.ResourceType, types.DeprecatedBehaviourMixin): +class ExpandUserPath(types.ResourceType): """Expands user path.""" def pre_process(self, resource_spec, config): @@ -58,7 +58,7 @@ class ExpandUserPath(types.ResourceType, types.DeprecatedBehaviourMixin): @plugin.configure(name="file_dict") -class FileTypeDict(types.ResourceType, types.DeprecatedBehaviourMixin): +class FileTypeDict(types.ResourceType): """Return the dictionary of items with file path and file content.""" def pre_process(self, resource_spec, config): diff --git a/rally/plugins/openstack/__init__.py b/rally/plugins/openstack/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/cfg/__init__.py b/rally/plugins/openstack/cfg/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/cfg/cinder.py b/rally/plugins/openstack/cfg/cinder.py deleted file mode 100644 index e1f0721763..0000000000 --- a/rally/plugins/openstack/cfg/cinder.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt("cinder_volume_create_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after creating a resource before" - " polling for it status"), - cfg.FloatOpt("cinder_volume_create_timeout", - default=600.0, - deprecated_group="benchmark", - help="Time to wait for cinder volume to be created."), - cfg.FloatOpt("cinder_volume_create_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for volume" - " creation."), - cfg.FloatOpt("cinder_volume_delete_timeout", - default=600.0, - deprecated_group="benchmark", - help="Time to wait for cinder volume to be deleted."), - cfg.FloatOpt("cinder_volume_delete_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for volume" - " deletion."), - cfg.FloatOpt("cinder_backup_restore_timeout", - default=600.0, - deprecated_group="benchmark", - help="Time to wait for cinder backup to be restored."), - cfg.FloatOpt("cinder_backup_restore_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for backup" - " restoring."), -]} diff --git a/rally/plugins/openstack/cfg/cleanup.py b/rally/plugins/openstack/cfg/cleanup.py deleted file mode 100644 index 1a18bc2968..0000000000 --- a/rally/plugins/openstack/cfg/cleanup.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.IntOpt("resource_deletion_timeout", - default=600, - deprecated_group="cleanup", - help="A timeout in seconds for deleting resources"), - cfg.IntOpt("cleanup_threads", - default=20, - deprecated_group="cleanup", - help="Number of cleanup threads to run") -]} diff --git a/rally/plugins/openstack/cfg/ec2.py b/rally/plugins/openstack/cfg/ec2.py deleted file mode 100644 index b758027727..0000000000 --- a/rally/plugins/openstack/cfg/ec2.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt( - "ec2_server_boot_prepoll_delay", - default=1.0, - deprecated_group="benchmark", - help="Time to sleep after boot before polling for status" - ), - cfg.FloatOpt( - "ec2_server_boot_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server boot timeout" - ), - cfg.FloatOpt( - "ec2_server_boot_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Server boot poll interval" - ) -]} diff --git a/rally/plugins/openstack/cfg/glance.py b/rally/plugins/openstack/cfg/glance.py deleted file mode 100644 index a6170f52ba..0000000000 --- a/rally/plugins/openstack/cfg/glance.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt("glance_image_delete_timeout", - default=120.0, - deprecated_group="benchmark", - help="Time to wait for glance image to be deleted."), - cfg.FloatOpt("glance_image_delete_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for image " - "deletion."), - cfg.FloatOpt("glance_image_create_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after creating a resource before " - "polling for it status"), - cfg.FloatOpt("glance_image_create_timeout", - default=120.0, - deprecated_group="benchmark", - help="Time to wait for glance image to be created."), - cfg.FloatOpt("glance_image_create_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for image " - "creation."), - cfg.FloatOpt("glance_image_create_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after creating a resource before " - "polling for it status"), - cfg.FloatOpt("glance_image_create_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for image " - "creation.") -]} diff --git a/rally/plugins/openstack/cfg/heat.py b/rally/plugins/openstack/cfg/heat.py deleted file mode 100644 index b8979f4d8a..0000000000 --- a/rally/plugins/openstack/cfg/heat.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt("heat_stack_create_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time(in sec) to sleep after creating a resource before " - "polling for it status."), - cfg.FloatOpt("heat_stack_create_timeout", - default=3600.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for heat stack to be created."), - cfg.FloatOpt("heat_stack_create_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "stack creation."), - cfg.FloatOpt("heat_stack_delete_timeout", - default=3600.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for heat stack to be deleted."), - cfg.FloatOpt("heat_stack_delete_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "stack deletion."), - cfg.FloatOpt("heat_stack_check_timeout", - default=3600.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for stack to be checked."), - cfg.FloatOpt("heat_stack_check_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "stack checking."), - cfg.FloatOpt("heat_stack_update_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time(in sec) to sleep after updating a resource before " - "polling for it status."), - cfg.FloatOpt("heat_stack_update_timeout", - default=3600.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for stack to be updated."), - cfg.FloatOpt("heat_stack_update_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "stack update."), - cfg.FloatOpt("heat_stack_suspend_timeout", - default=3600.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for stack to be suspended."), - cfg.FloatOpt("heat_stack_suspend_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "stack suspend."), - cfg.FloatOpt("heat_stack_resume_timeout", - default=3600.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for stack to be resumed."), - cfg.FloatOpt("heat_stack_resume_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "stack resume."), - cfg.FloatOpt("heat_stack_snapshot_timeout", - default=3600.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for stack snapshot to " - "be created."), - cfg.FloatOpt("heat_stack_snapshot_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "stack snapshot to be created."), - cfg.FloatOpt("heat_stack_restore_timeout", - default=3600.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for stack to be restored from " - "snapshot."), - cfg.FloatOpt("heat_stack_restore_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "stack to be restored."), - cfg.FloatOpt("heat_stack_scale_timeout", - default=3600.0, - deprecated_group="benchmark", - help="Time (in sec) to wait for stack to scale up or down."), - cfg.FloatOpt("heat_stack_scale_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval (in sec) between checks when waiting for " - "a stack to scale up or down.") -]} diff --git a/rally/plugins/openstack/cfg/ironic.py b/rally/plugins/openstack/cfg/ironic.py deleted file mode 100644 index a0f3c62a02..0000000000 --- a/rally/plugins/openstack/cfg/ironic.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt("ironic_node_create_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Interval(in sec) between checks when waiting for node " - "creation."), - cfg.FloatOpt("ironic_node_create_timeout", - default=300, - deprecated_group="benchmark", - help="Ironic node create timeout"), - cfg.FloatOpt("ironic_node_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Ironic node poll interval"), - cfg.FloatOpt("ironic_node_delete_timeout", - default=300, - deprecated_group="benchmark", - help="Ironic node create timeout") -]} diff --git a/rally/plugins/openstack/cfg/keystone_roles.py b/rally/plugins/openstack/cfg/keystone_roles.py deleted file mode 100644 index 474816318b..0000000000 --- a/rally/plugins/openstack/cfg/keystone_roles.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.IntOpt("roles_context_resource_management_workers", - default=30, - deprecated_name="resource_management_workers", - deprecated_group="roles_context", - help="How many concurrent threads to use for serving roles " - "context"), -]} diff --git a/rally/plugins/openstack/cfg/keystone_users.py b/rally/plugins/openstack/cfg/keystone_users.py deleted file mode 100644 index 1a88aa7343..0000000000 --- a/rally/plugins/openstack/cfg/keystone_users.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - - -OPTS = {"openstack": [ - cfg.IntOpt("users_context_resource_management_workers", - default=20, - deprecated_name="resource_management_workers", - deprecated_group="users_context", - help="The number of concurrent threads to use for serving " - "users context."), - cfg.StrOpt("project_domain", - default="default", - deprecated_group="users_context", - help="ID of domain in which projects will be created."), - cfg.StrOpt("user_domain", - default="default", - deprecated_group="users_context", - help="ID of domain in which users will be created."), - cfg.StrOpt("keystone_default_role", - default="member", - deprecated_group="users_context", - help="The default role name of the keystone to assign to " - "users.") -]} diff --git a/rally/plugins/openstack/cfg/magnum.py b/rally/plugins/openstack/cfg/magnum.py deleted file mode 100644 index 174bd683c9..0000000000 --- a/rally/plugins/openstack/cfg/magnum.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt("magnum_cluster_create_prepoll_delay", - default=5.0, - deprecated_group="benchmark", - help="Time(in sec) to sleep after creating a resource before " - "polling for the status."), - cfg.FloatOpt("magnum_cluster_create_timeout", - default=2400.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for magnum cluster to be " - "created."), - cfg.FloatOpt("magnum_cluster_create_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "cluster creation."), - cfg.FloatOpt("k8s_pod_create_timeout", - default=1200.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for k8s pod to be created."), - cfg.FloatOpt("k8s_pod_create_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "k8s pod creation."), - cfg.FloatOpt("k8s_rc_create_timeout", - default=1200.0, - deprecated_group="benchmark", - help="Time(in sec) to wait for k8s rc to be created."), - cfg.FloatOpt("k8s_rc_create_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Time interval(in sec) between checks when waiting for " - "k8s rc creation.") -]} diff --git a/rally/plugins/openstack/cfg/manila.py b/rally/plugins/openstack/cfg/manila.py deleted file mode 100644 index 86614dcad6..0000000000 --- a/rally/plugins/openstack/cfg/manila.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt( - "manila_share_create_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Delay between creating Manila share and polling for its " - "status."), - cfg.FloatOpt( - "manila_share_create_timeout", - default=300.0, - deprecated_group="benchmark", - help="Timeout for Manila share creation."), - cfg.FloatOpt( - "manila_share_create_poll_interval", - default=3.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for Manila share " - "creation."), - cfg.FloatOpt( - "manila_share_delete_timeout", - default=180.0, - deprecated_group="benchmark", - help="Timeout for Manila share deletion."), - cfg.FloatOpt( - "manila_share_delete_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for Manila share " - "deletion."), - cfg.FloatOpt( - "manila_access_create_timeout", - default=300.0, - deprecated_group="benchmark", - help="Timeout for Manila access creation."), - cfg.FloatOpt( - "manila_access_create_poll_interval", - default=3.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for Manila access " - "creation."), - cfg.FloatOpt( - "manila_access_delete_timeout", - default=180.0, - deprecated_group="benchmark", - help="Timeout for Manila access deletion."), - cfg.FloatOpt( - "manila_access_delete_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for Manila access " - "deletion."), -]} diff --git a/rally/plugins/openstack/cfg/mistral.py b/rally/plugins/openstack/cfg/mistral.py deleted file mode 100644 index 6c5abdb106..0000000000 --- a/rally/plugins/openstack/cfg/mistral.py +++ /dev/null @@ -1,23 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.IntOpt( - "mistral_execution_timeout", - default=200, - deprecated_group="benchmark", - help="mistral execution timeout") -]} diff --git a/rally/plugins/openstack/cfg/monasca.py b/rally/plugins/openstack/cfg/monasca.py deleted file mode 100644 index 0f461ee035..0000000000 --- a/rally/plugins/openstack/cfg/monasca.py +++ /dev/null @@ -1,24 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt( - "monasca_metric_create_prepoll_delay", - default=15.0, - deprecated_group="benchmark", - help="Delay between creating Monasca metrics and polling for " - "its elements.") -]} diff --git a/rally/plugins/openstack/cfg/murano.py b/rally/plugins/openstack/cfg/murano.py deleted file mode 100644 index 965167f19c..0000000000 --- a/rally/plugins/openstack/cfg/murano.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.IntOpt("murano_deploy_environment_timeout", - default=1200, - deprecated_name="deploy_environment_timeout", - deprecated_group="benchmark", - help="A timeout in seconds for an environment deploy"), - cfg.IntOpt("murano_deploy_environment_check_interval", - default=5, - deprecated_name="deploy_environment_check_interval", - deprecated_group="benchmark", - help="Deploy environment check interval in seconds"), -]} diff --git a/rally/plugins/openstack/cfg/neutron.py b/rally/plugins/openstack/cfg/neutron.py deleted file mode 100644 index ad7dc2f36b..0000000000 --- a/rally/plugins/openstack/cfg/neutron.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt("neutron_create_loadbalancer_timeout", - default=float(500), - deprecated_group="benchmark", - help="Neutron create loadbalancer timeout"), - cfg.FloatOpt("neutron_create_loadbalancer_poll_interval", - default=float(2), - deprecated_group="benchmark", - help="Neutron create loadbalancer poll interval"), - cfg.BoolOpt("pre_newton_neutron", - default=False, - help="Whether Neutron API is older then OpenStack Newton or " - "not. Based in this option, some external fields for " - "identifying resources can be applied.") -]} diff --git a/rally/plugins/openstack/cfg/nova.py b/rally/plugins/openstack/cfg/nova.py deleted file mode 100644 index 23269fbfa2..0000000000 --- a/rally/plugins/openstack/cfg/nova.py +++ /dev/null @@ -1,308 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - # prepoll delay, timeout, poll interval - # "start": (0, 300, 1) - cfg.FloatOpt("nova_server_start_prepoll_delay", - default=0.0, - deprecated_group="benchmark", - help="Time to sleep after start before polling for status"), - cfg.FloatOpt("nova_server_start_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server start timeout"), - cfg.FloatOpt("nova_server_start_poll_interval", - deprecated_group="benchmark", - default=1.0, - help="Server start poll interval"), - # "stop": (0, 300, 2) - cfg.FloatOpt("nova_server_stop_prepoll_delay", - default=0.0, - help="Time to sleep after stop before polling for status"), - cfg.FloatOpt("nova_server_stop_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server stop timeout"), - cfg.FloatOpt("nova_server_stop_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server stop poll interval"), - # "boot": (1, 300, 1) - cfg.FloatOpt("nova_server_boot_prepoll_delay", - default=1.0, - deprecated_group="benchmark", - help="Time to sleep after boot before polling for status"), - cfg.FloatOpt("nova_server_boot_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server boot timeout"), - cfg.FloatOpt("nova_server_boot_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server boot poll interval"), - # "delete": (2, 300, 2) - cfg.FloatOpt("nova_server_delete_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after delete before polling for status"), - cfg.FloatOpt("nova_server_delete_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server delete timeout"), - cfg.FloatOpt("nova_server_delete_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server delete poll interval"), - # "reboot": (2, 300, 2) - cfg.FloatOpt("nova_server_reboot_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after reboot before polling for status"), - cfg.FloatOpt("nova_server_reboot_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server reboot timeout"), - cfg.FloatOpt("nova_server_reboot_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server reboot poll interval"), - # "rebuild": (1, 300, 1) - cfg.FloatOpt("nova_server_rebuild_prepoll_delay", - default=1.0, - deprecated_group="benchmark", - help="Time to sleep after rebuild before polling for status"), - cfg.FloatOpt("nova_server_rebuild_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server rebuild timeout"), - cfg.FloatOpt("nova_server_rebuild_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Server rebuild poll interval"), - # "rescue": (2, 300, 2) - cfg.FloatOpt("nova_server_rescue_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after rescue before polling for status"), - cfg.FloatOpt("nova_server_rescue_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server rescue timeout"), - cfg.FloatOpt("nova_server_rescue_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server rescue poll interval"), - # "unrescue": (2, 300, 2) - cfg.FloatOpt("nova_server_unrescue_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after unrescue " - "before polling for status"), - cfg.FloatOpt("nova_server_unrescue_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server unrescue timeout"), - cfg.FloatOpt("nova_server_unrescue_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server unrescue poll interval"), - # "suspend": (2, 300, 2) - cfg.FloatOpt("nova_server_suspend_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after suspend before polling for status"), - cfg.FloatOpt("nova_server_suspend_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server suspend timeout"), - cfg.FloatOpt("nova_server_suspend_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server suspend poll interval"), - # "resume": (2, 300, 2) - cfg.FloatOpt("nova_server_resume_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after resume before polling for status"), - cfg.FloatOpt("nova_server_resume_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server resume timeout"), - cfg.FloatOpt("nova_server_resume_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server resume poll interval"), - # "pause": (2, 300, 2) - cfg.FloatOpt("nova_server_pause_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after pause before polling for status"), - cfg.FloatOpt("nova_server_pause_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server pause timeout"), - cfg.FloatOpt("nova_server_pause_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server pause poll interval"), - # "unpause": (2, 300, 2) - cfg.FloatOpt("nova_server_unpause_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after unpause before polling for status"), - cfg.FloatOpt("nova_server_unpause_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server unpause timeout"), - cfg.FloatOpt("nova_server_unpause_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server unpause poll interval"), - # "shelve": (2, 300, 2) - cfg.FloatOpt("nova_server_shelve_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after shelve before polling for status"), - cfg.FloatOpt("nova_server_shelve_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server shelve timeout"), - cfg.FloatOpt("nova_server_shelve_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server shelve poll interval"), - # "unshelve": (2, 300, 2) - cfg.FloatOpt("nova_server_unshelve_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after unshelve before " - "polling for status"), - cfg.FloatOpt("nova_server_unshelve_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server unshelve timeout"), - cfg.FloatOpt("nova_server_unshelve_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server unshelve poll interval"), - # "image_create": (0, 300, 2) - cfg.FloatOpt("nova_server_image_create_prepoll_delay", - default=0.0, - deprecated_group="benchmark", - help="Time to sleep after image_create before polling" - " for status"), - cfg.FloatOpt("nova_server_image_create_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server image_create timeout"), - cfg.FloatOpt("nova_server_image_create_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server image_create poll interval"), - # "image_delete": (0, 300, 2) - cfg.FloatOpt("nova_server_image_delete_prepoll_delay", - default=0.0, - deprecated_group="benchmark", - help="Time to sleep after image_delete before polling" - " for status"), - cfg.FloatOpt("nova_server_image_delete_timeout", - default=300.0, - deprecated_group="benchmark", - help="Server image_delete timeout"), - cfg.FloatOpt("nova_server_image_delete_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server image_delete poll interval"), - # "resize": (2, 400, 5) - cfg.FloatOpt("nova_server_resize_prepoll_delay", - default=2.0, - deprecated_group="benchmark", - help="Time to sleep after resize before polling for status"), - cfg.FloatOpt("nova_server_resize_timeout", - default=400.0, - deprecated_group="benchmark", - help="Server resize timeout"), - cfg.FloatOpt("nova_server_resize_poll_interval", - default=5.0, - deprecated_group="benchmark", - help="Server resize poll interval"), - # "resize_confirm": (0, 200, 2) - cfg.FloatOpt("nova_server_resize_confirm_prepoll_delay", - default=0.0, - deprecated_group="benchmark", - help="Time to sleep after resize_confirm before polling" - " for status"), - cfg.FloatOpt("nova_server_resize_confirm_timeout", - default=200.0, - deprecated_group="benchmark", - help="Server resize_confirm timeout"), - cfg.FloatOpt("nova_server_resize_confirm_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server resize_confirm poll interval"), - # "resize_revert": (0, 200, 2) - cfg.FloatOpt("nova_server_resize_revert_prepoll_delay", - default=0.0, - deprecated_group="benchmark", - help="Time to sleep after resize_revert before polling" - " for status"), - cfg.FloatOpt("nova_server_resize_revert_timeout", - default=200.0, - deprecated_group="benchmark", - help="Server resize_revert timeout"), - cfg.FloatOpt("nova_server_resize_revert_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server resize_revert poll interval"), - # "live_migrate": (1, 400, 2) - cfg.FloatOpt("nova_server_live_migrate_prepoll_delay", - default=1.0, - deprecated_group="benchmark", - help="Time to sleep after live_migrate before polling" - " for status"), - cfg.FloatOpt("nova_server_live_migrate_timeout", - default=400.0, - deprecated_group="benchmark", - help="Server live_migrate timeout"), - cfg.FloatOpt("nova_server_live_migrate_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server live_migrate poll interval"), - # "migrate": (1, 400, 2) - cfg.FloatOpt("nova_server_migrate_prepoll_delay", - default=1.0, - deprecated_group="benchmark", - help="Time to sleep after migrate before polling for status"), - cfg.FloatOpt("nova_server_migrate_timeout", - default=400.0, - deprecated_group="benchmark", - help="Server migrate timeout"), - cfg.FloatOpt("nova_server_migrate_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Server migrate poll interval"), - # "detach": - cfg.FloatOpt("nova_detach_volume_timeout", - default=200.0, - deprecated_group="benchmark", - help="Nova volume detach timeout"), - cfg.FloatOpt("nova_detach_volume_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Nova volume detach poll interval") -]} diff --git a/rally/plugins/openstack/cfg/opts.py b/rally/plugins/openstack/cfg/opts.py deleted file mode 100644 index edab940f23..0000000000 --- a/rally/plugins/openstack/cfg/opts.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack.cfg import cinder -from rally.plugins.openstack.cfg import ec2 -from rally.plugins.openstack.cfg import glance -from rally.plugins.openstack.cfg import heat -from rally.plugins.openstack.cfg import ironic -from rally.plugins.openstack.cfg import magnum -from rally.plugins.openstack.cfg import manila -from rally.plugins.openstack.cfg import mistral -from rally.plugins.openstack.cfg import monasca -from rally.plugins.openstack.cfg import murano -from rally.plugins.openstack.cfg import neutron -from rally.plugins.openstack.cfg import nova -from rally.plugins.openstack.cfg import osclients -from rally.plugins.openstack.cfg import profiler -from rally.plugins.openstack.cfg import sahara -from rally.plugins.openstack.cfg import senlin -from rally.plugins.openstack.cfg import vm -from rally.plugins.openstack.cfg import watcher - -from rally.plugins.openstack.cfg import tempest - -from rally.plugins.openstack.cfg import keystone_roles -from rally.plugins.openstack.cfg import keystone_users - -from rally.plugins.openstack.cfg import cleanup - - -def list_opts(): - - opts = {} - for l_opts in (cinder.OPTS, ec2.OPTS, heat.OPTS, ironic.OPTS, magnum.OPTS, - manila.OPTS, mistral.OPTS, monasca.OPTS, murano.OPTS, - nova.OPTS, osclients.OPTS, profiler.OPTS, sahara.OPTS, - vm.OPTS, glance.OPTS, watcher.OPTS, tempest.OPTS, - keystone_roles.OPTS, keystone_users.OPTS, cleanup.OPTS, - senlin.OPTS, neutron.OPTS): - for category, opt in l_opts.items(): - opts.setdefault(category, []) - opts[category].extend(opt) - return opts diff --git a/rally/plugins/openstack/cfg/osclients.py b/rally/plugins/openstack/cfg/osclients.py deleted file mode 100644 index ce9ab0ffdc..0000000000 --- a/rally/plugins/openstack/cfg/osclients.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2017: GoDaddy Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - - -OPTS = { - "DEFAULT": [ - cfg.FloatOpt( - "openstack_client_http_timeout", - default=180.0, - help="HTTP timeout for any of OpenStack service in seconds") - ] -} diff --git a/rally/plugins/openstack/cfg/profiler.py b/rally/plugins/openstack/cfg/profiler.py deleted file mode 100644 index 06362cbdbf..0000000000 --- a/rally/plugins/openstack/cfg/profiler.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2017: Inria. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.BoolOpt("enable_profiler", - default=True, - deprecated_group="benchmark", - help="Enable or disable osprofiler to trace the scenarios") -]} diff --git a/rally/plugins/openstack/cfg/sahara.py b/rally/plugins/openstack/cfg/sahara.py deleted file mode 100644 index 4123fea863..0000000000 --- a/rally/plugins/openstack/cfg/sahara.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.IntOpt("sahara_cluster_create_timeout", - default=1800, - deprecated_group="benchmark", - help="A timeout in seconds for a cluster create operation"), - cfg.IntOpt("sahara_cluster_delete_timeout", - default=900, - deprecated_group="benchmark", - help="A timeout in seconds for a cluster delete operation"), - cfg.IntOpt("sahara_cluster_check_interval", - default=5, - deprecated_group="benchmark", - help="Cluster status polling interval in seconds"), - cfg.IntOpt("sahara_job_execution_timeout", - default=600, - deprecated_group="benchmark", - help="A timeout in seconds for a Job Execution to complete"), - cfg.IntOpt("sahara_job_check_interval", - default=5, - deprecated_group="benchmark", - help="Job Execution status polling interval in seconds"), - cfg.IntOpt("sahara_workers_per_proxy", - default=20, - deprecated_group="benchmark", - help="Amount of workers one proxy should serve to.") -]} diff --git a/rally/plugins/openstack/cfg/senlin.py b/rally/plugins/openstack/cfg/senlin.py deleted file mode 100644 index e5490e1c1e..0000000000 --- a/rally/plugins/openstack/cfg/senlin.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt("senlin_action_timeout", - default=3600, - deprecated_group="benchmark", - help="Time in seconds to wait for senlin action to finish.") -]} diff --git a/rally/plugins/openstack/cfg/tempest.py b/rally/plugins/openstack/cfg/tempest.py deleted file mode 100644 index 9806b16b03..0000000000 --- a/rally/plugins/openstack/cfg/tempest.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.StrOpt("img_url", - default="http://download.cirros-cloud.net/" - "0.3.5/cirros-0.3.5-x86_64-disk.img", - deprecated_group="tempest", - help="image URL"), - cfg.StrOpt("img_disk_format", - default="qcow2", - deprecated_group="tempest", - help="Image disk format to use when creating the image"), - cfg.StrOpt("img_container_format", - default="bare", - deprecated_group="tempest", - help="Image container format to use when creating the image"), - cfg.StrOpt("img_name_regex", - default="^.*(cirros|testvm).*$", - deprecated_group="tempest", - help="Regular expression for name of a public image to " - "discover it in the cloud and use it for the tests. " - "Note that when Rally is searching for the image, case " - "insensitive matching is performed. Specify nothing " - "('img_name_regex =') if you want to disable discovering. " - "In this case Rally will create needed resources by " - "itself if the values for the corresponding config " - "options are not specified in the Tempest config file"), - cfg.StrOpt("swift_operator_role", - default="Member", - deprecated_group="tempest", - help="Role required for users " - "to be able to create Swift containers"), - cfg.StrOpt("swift_reseller_admin_role", - default="ResellerAdmin", - deprecated_group="tempest", - help="User role that has reseller admin"), - cfg.StrOpt("heat_stack_owner_role", - default="heat_stack_owner", - deprecated_group="tempest", - help="Role required for users " - "to be able to manage Heat stacks"), - cfg.StrOpt("heat_stack_user_role", - default="heat_stack_user", - deprecated_group="tempest", - help="Role for Heat template-defined users"), - cfg.IntOpt("flavor_ref_ram", - default="64", - deprecated_group="tempest", - help="Primary flavor RAM size used by most of the test cases"), - cfg.IntOpt("flavor_ref_alt_ram", - default="128", - deprecated_group="tempest", - help="Alternate reference flavor RAM size used by test that" - "need two flavors, like those that resize an instance"), - cfg.IntOpt("heat_instance_type_ram", - default="64", - deprecated_group="tempest", - help="RAM size flavor used for orchestration test cases") -]} diff --git a/rally/plugins/openstack/cfg/vm.py b/rally/plugins/openstack/cfg/vm.py deleted file mode 100644 index 8fb6f2143a..0000000000 --- a/rally/plugins/openstack/cfg/vm.py +++ /dev/null @@ -1,27 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt("vm_ping_poll_interval", - default=1.0, - deprecated_group="benchmark", - help="Interval between checks when waiting for a VM to " - "become pingable"), - cfg.FloatOpt("vm_ping_timeout", - default=120.0, - deprecated_group="benchmark", - help="Time to wait for a VM to become pingable") -]} diff --git a/rally/plugins/openstack/cfg/watcher.py b/rally/plugins/openstack/cfg/watcher.py deleted file mode 100644 index 615bf1b144..0000000000 --- a/rally/plugins/openstack/cfg/watcher.py +++ /dev/null @@ -1,26 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg - -OPTS = {"openstack": [ - cfg.FloatOpt("watcher_audit_launch_poll_interval", - default=2.0, - deprecated_group="benchmark", - help="Watcher audit launch interval"), - cfg.IntOpt("watcher_audit_launch_timeout", - default=300, - deprecated_group="benchmark", - help="Watcher audit launch timeout") -]} diff --git a/rally/plugins/openstack/cleanup/__init__.py b/rally/plugins/openstack/cleanup/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/cleanup/base.py b/rally/plugins/openstack/cleanup/base.py deleted file mode 100644 index 7623058d94..0000000000 --- a/rally/plugins/openstack/cleanup/base.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from rally.common import cfg -from rally.task import utils - -CONF = cfg.CONF - -cleanup_group = cfg.OptGroup(name="cleanup", title="Cleanup Options") - - -# NOTE(andreykurilin): There are cases when there is no way to use any kind -# of "name" for resource as an identifier of alignment resource to the -# particular task run and even to Rally itself. Previously, we used empty -# strings as a workaround for name matching specific templates, but -# theoretically such behaviour can hide other cases when resource should have -# a name property, but it is missed. -# Let's use instances of specific class to return as a name of resources -# which do not have names at all. -class NoName(object): - def __init__(self, resource_type): - self.resource_type = resource_type - - def __repr__(self): - return "" % self.resource_type - - -def resource(service, resource, order=0, admin_required=False, - perform_for_admin_only=False, tenant_resource=False, - max_attempts=3, timeout=CONF.openstack.resource_deletion_timeout, - interval=1, threads=CONF.openstack.cleanup_threads): - """Decorator that overrides resource specification. - - Just put it on top of your resource class and specify arguments that you - need. - - :param service: It is equal to client name for corresponding service. - E.g. "nova", "cinder" or "zaqar" - :param resource: Client manager name for resource. E.g. in case of - nova.servers you should write here "servers" - :param order: Used to adjust priority of cleanup for different resource - types - :param admin_required: Admin user is required - :param perform_for_admin_only: Perform cleanup for admin user only - :param tenant_resource: Perform deletion only 1 time per tenant - :param max_attempts: Max amount of attempts to delete single resource - :param timeout: Max duration of deletion in seconds - :param interval: Resource status pooling interval - :param threads: Amount of threads (workers) that are deleting resources - simultaneously - """ - - def inner(cls): - # TODO(boris-42): This can be written better I believe =) - cls._service = service - cls._resource = resource - cls._order = order - cls._admin_required = admin_required - cls._perform_for_admin_only = perform_for_admin_only - cls._max_attempts = max_attempts - cls._timeout = timeout - cls._interval = interval - cls._threads = threads - cls._tenant_resource = tenant_resource - - return cls - - return inner - - -@resource(service=None, resource=None) -class ResourceManager(object): - """Base class for cleanup plugins for specific resources. - - You should use @resource decorator to specify major configuration of - resource manager. Usually you should specify: service, resource and order. - - If project python client is very specific, you can override delete(), - list() and is_deleted() methods to make them fit to your case. - """ - - def __init__(self, resource=None, admin=None, user=None, tenant_uuid=None): - self.admin = admin - self.user = user - self.raw_resource = resource - self.tenant_uuid = tenant_uuid - - def _manager(self): - client = self._admin_required and self.admin or self.user - return getattr(getattr(client, self._service)(), self._resource) - - def id(self): - """Returns id of resource.""" - return self.raw_resource.id - - def name(self): - """Returns name of resource.""" - return self.raw_resource.name - - def is_deleted(self): - """Checks if the resource is deleted. - - Fetch resource by id from service and check it status. - In case of NotFound or status is DELETED or DELETE_COMPLETE returns - True, otherwise False. - """ - try: - resource = self._manager().get(self.id()) - except Exception as e: - return getattr(e, "code", getattr(e, "http_status", 400)) == 404 - - return utils.get_status(resource) in ("DELETED", "DELETE_COMPLETE") - - def delete(self): - """Delete resource that corresponds to instance of this class.""" - self._manager().delete(self.id()) - - def list(self): - """List all resources specific for admin or user.""" - return self._manager().list() diff --git a/rally/plugins/openstack/cleanup/manager.py b/rally/plugins/openstack/cleanup/manager.py deleted file mode 100644 index 44d80e3ec7..0000000000 --- a/rally/plugins/openstack/cleanup/manager.py +++ /dev/null @@ -1,285 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from rally.common import broker -from rally.common import logging -from rally.common.plugin import discover -from rally.common.plugin import plugin -from rally.common import utils as rutils -from rally.plugins.openstack.cleanup import base - - -LOG = logging.getLogger(__name__) - - -class SeekAndDestroy(object): - - def __init__(self, manager_cls, admin, users, api_versions=None, - resource_classes=None, task_id=None): - """Resource deletion class. - - This class contains method exterminate() that finds and deletes - all resources created by Rally. - - :param manager_cls: subclass of base.ResourceManager - :param admin: admin credential like in context["admin"] - :param users: users credentials like in context["users"] - :param api_versions: dict of client API versions - :param resource_classes: Resource classes to match resource names - against - :param task_id: The UUID of task to match resource names against - """ - self.manager_cls = manager_cls - self.admin = admin - self.users = users or [] - self.api_versions = api_versions - self.resource_classes = resource_classes or [ - rutils.RandomNameGeneratorMixin] - self.task_id = task_id - - def _get_cached_client(self, user): - """Simplifies initialization and caching OpenStack clients.""" - if not user: - return None - # NOTE(astudenov): Credential now supports caching by default - return user["credential"].clients(api_info=self.api_versions) - - def _delete_single_resource(self, resource): - """Safe resource deletion with retries and timeouts. - - Send request to delete resource, in case of failures repeat it few - times. After that pull status of resource until it's deleted. - - Writes in LOG warning with UUID of resource that wasn't deleted - - :param resource: instance of resource manager initiated with resource - that should be deleted. - """ - - msg_kw = { - "uuid": resource.id(), - "name": resource.name() or "", - "service": resource._service, - "resource": resource._resource - } - - LOG.debug( - "Deleting %(service)s.%(resource)s object %(name)s (%(uuid)s)" - % msg_kw) - - try: - rutils.retry(resource._max_attempts, resource.delete) - except Exception as e: - msg = ("Resource deletion failed, max retries exceeded for " - "%(service)s.%(resource)s: %(uuid)s.") % msg_kw - - if logging.is_debug(): - LOG.exception(msg) - else: - LOG.warning("%(msg)s Reason: %(e)s" % {"msg": msg, "e": e}) - else: - started = time.time() - failures_count = 0 - while time.time() - started < resource._timeout: - try: - if resource.is_deleted(): - return - except Exception as e: - LOG.exception( - "Seems like %s.%s.is_deleted(self) method is broken " - "It shouldn't raise any exceptions." - % (resource.__module__, type(resource).__name__)) - - # NOTE(boris-42): Avoid LOG spamming in case of bad - # is_deleted() method - failures_count += 1 - if failures_count > resource._max_attempts: - break - - finally: - rutils.interruptable_sleep(resource._interval) - - LOG.warning("Resource deletion failed, timeout occurred for " - "%(service)s.%(resource)s: %(uuid)s." % msg_kw) - - def _publisher(self, queue): - """Publisher for deletion jobs. - - This method iterates over all users, lists all resources - (using manager_cls) and puts jobs for deletion. - - Every deletion job contains tuple with two values: user and resource - uuid that should be deleted. - - In case of tenant based resource, uuids are fetched only from one user - per tenant. - """ - def _publish(admin, user, manager): - try: - for raw_resource in rutils.retry(3, manager.list): - queue.append((admin, user, raw_resource)) - except Exception: - LOG.exception( - "Seems like %s.%s.list(self) method is broken. " - "It shouldn't raise any exceptions." - % (manager.__module__, type(manager).__name__)) - - if self.admin and (not self.users - or self.manager_cls._perform_for_admin_only): - manager = self.manager_cls( - admin=self._get_cached_client(self.admin)) - _publish(self.admin, None, manager) - - else: - visited_tenants = set() - admin_client = self._get_cached_client(self.admin) - for user in self.users: - if (self.manager_cls._tenant_resource - and user["tenant_id"] in visited_tenants): - continue - - visited_tenants.add(user["tenant_id"]) - manager = self.manager_cls( - admin=admin_client, - user=self._get_cached_client(user), - tenant_uuid=user["tenant_id"]) - _publish(self.admin, user, manager) - - def _consumer(self, cache, args): - """Method that consumes single deletion job.""" - admin, user, raw_resource = args - - manager = self.manager_cls( - resource=raw_resource, - admin=self._get_cached_client(admin), - user=self._get_cached_client(user), - tenant_uuid=user and user["tenant_id"]) - - if (isinstance(manager.name(), base.NoName) or - rutils.name_matches_object( - manager.name(), *self.resource_classes, - task_id=self.task_id, exact=False)): - self._delete_single_resource(manager) - - def exterminate(self): - """Delete all resources for passed users, admin and resource_mgr.""" - - broker.run(self._publisher, self._consumer, - consumers_count=self.manager_cls._threads) - - -def list_resource_names(admin_required=None): - """List all resource managers names. - - Returns all service names and all combination of service.resource names. - - :param admin_required: None -> returns all ResourceManagers - True -> returns only admin ResourceManagers - False -> returns only non admin ResourceManagers - """ - res_mgrs = discover.itersubclasses(base.ResourceManager) - if admin_required is not None: - res_mgrs = filter(lambda cls: cls._admin_required == admin_required, - res_mgrs) - - names = set() - for cls in res_mgrs: - names.add(cls._service) - names.add("%s.%s" % (cls._service, cls._resource)) - - return names - - -def find_resource_managers(names=None, admin_required=None): - """Returns resource managers. - - :param names: List of names in format or . - that is used for filtering resource manager classes - :param admin_required: None -> returns all ResourceManagers - True -> returns only admin ResourceManagers - False -> returns only non admin ResourceManagers - """ - names = set(names or []) - - resource_managers = [] - for manager in discover.itersubclasses(base.ResourceManager): - if admin_required is not None: - if admin_required != manager._admin_required: - continue - - if (manager._service in names - or "%s.%s" % (manager._service, manager._resource) in names): - resource_managers.append(manager) - - resource_managers.sort(key=lambda x: x._order) - - found_names = set() - for mgr in resource_managers: - found_names.add(mgr._service) - found_names.add("%s.%s" % (mgr._service, mgr._resource)) - - missing = names - found_names - if missing: - LOG.warning("Missing resource managers: %s" % ", ".join(missing)) - - return resource_managers - - -def cleanup(names=None, admin_required=None, admin=None, users=None, - api_versions=None, superclass=plugin.Plugin, task_id=None): - """Generic cleaner. - - This method goes through all plugins. Filter those and left only plugins - with _service from services or _resource from resources. - - Then goes through all passed users and using cleaners cleans all related - resources. - - :param names: Use only resource managers that have names in this list. - There are in as _service or - (%s.%s % (_service, _resource)) from - :param admin_required: If None -> return all plugins - If True -> return only admin plugins - If False -> return only non admin plugins - :param admin: rally.deployment.credential.Credential that corresponds to - OpenStack admin. - :param users: List of OpenStack users that was used during testing. - Every user has next structure: - { - "id": , - "tenant_id": , - "credential": - } - :param superclass: The plugin superclass to perform cleanup - for. E.g., this could be - ``rally.task.scenario.Scenario`` to cleanup all - Scenario resources. - :param task_id: The UUID of task - """ - resource_classes = [cls for cls in discover.itersubclasses(superclass) - if issubclass(cls, rutils.RandomNameGeneratorMixin)] - if not resource_classes and issubclass(superclass, - rutils.RandomNameGeneratorMixin): - resource_classes.append(superclass) - for manager in find_resource_managers(names, admin_required): - LOG.debug("Cleaning up %(service)s %(resource)s objects" - % {"service": manager._service, - "resource": manager._resource}) - SeekAndDestroy(manager, admin, users, - api_versions=api_versions, - resource_classes=resource_classes, - task_id=task_id).exterminate() diff --git a/rally/plugins/openstack/cleanup/resources.py b/rally/plugins/openstack/cleanup/resources.py deleted file mode 100644 index 55c49b03cd..0000000000 --- a/rally/plugins/openstack/cleanup/resources.py +++ /dev/null @@ -1,970 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from boto import exception as boto_exception -from neutronclient.common import exceptions as neutron_exceptions -from novaclient import exceptions as nova_exc -from saharaclient.api import base as saharaclient_base - -from rally.common import cfg -from rally.common import logging -from rally.plugins.openstack.cleanup import base -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.services.image import glance_v2 -from rally.plugins.openstack.services.image import image -from rally.task import utils as task_utils - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -def get_order(start): - return iter(range(start, start + 99)) - - -class SynchronizedDeletion(object): - - def is_deleted(self): - return True - - -class QuotaMixin(SynchronizedDeletion, base.ResourceManager): - # NOTE(andreykurilin): Quotas resources are quite complex in terms of - # cleanup. First of all, they do not have name, id fields at all. There - # is only one identifier - reference to Keystone Project/Tenant. Also, - # we should remove them in case of existing users case... To cover both - # cases we should use project name as name field (it will allow to pass - # existing users case) and project id as id of resource - - def list(self): - if not self.tenant_uuid: - return [] - client = self._admin_required and self.admin or self.user - project = identity.Identity(client).get_project(self.tenant_uuid) - return [project] - - -# MAGNUM - -_magnum_order = get_order(80) - - -@base.resource(service=None, resource=None) -class MagnumMixin(base.ResourceManager): - - def id(self): - """Returns id of resource.""" - return self.raw_resource.uuid - - def list(self): - result = [] - marker = None - while True: - resources = self._manager().list(marker=marker) - if not resources: - break - result.extend(resources) - marker = resources[-1].uuid - return result - - -@base.resource("magnum", "clusters", order=next(_magnum_order), - tenant_resource=True) -class MagnumCluster(MagnumMixin): - """Resource class for Magnum cluster.""" - - -@base.resource("magnum", "cluster_templates", order=next(_magnum_order), - tenant_resource=True) -class MagnumClusterTemplate(MagnumMixin): - """Resource class for Magnum cluster_template.""" - - -# HEAT - -@base.resource("heat", "stacks", order=100, tenant_resource=True) -class HeatStack(base.ResourceManager): - def name(self): - return self.raw_resource.stack_name - - -# SENLIN - -_senlin_order = get_order(150) - - -@base.resource(service=None, resource=None, admin_required=True) -class SenlinMixin(base.ResourceManager): - - def id(self): - return self.raw_resource["id"] - - def _manager(self): - client = self._admin_required and self.admin or self.user - return getattr(client, self._service)() - - def list(self): - return getattr(self._manager(), self._resource)() - - def delete(self): - # make singular form of resource name from plural form - res_name = self._resource[:-1] - return getattr(self._manager(), "delete_%s" % res_name)(self.id()) - - -@base.resource("senlin", "clusters", - admin_required=True, order=next(_senlin_order)) -class SenlinCluster(SenlinMixin): - """Resource class for Senlin Cluster.""" - - -@base.resource("senlin", "profiles", order=next(_senlin_order), - admin_required=False, tenant_resource=True) -class SenlinProfile(SenlinMixin): - """Resource class for Senlin Profile.""" - - -# NOVA - -_nova_order = get_order(200) - - -@base.resource("nova", "servers", order=next(_nova_order), - tenant_resource=True) -class NovaServer(base.ResourceManager): - def list(self): - """List all servers.""" - return self._manager().list(limit=-1) - - def delete(self): - if getattr(self.raw_resource, "OS-EXT-STS:locked", False): - self.raw_resource.unlock() - super(NovaServer, self).delete() - - -@base.resource("nova", "server_groups", order=next(_nova_order), - tenant_resource=True) -class NovaServerGroups(base.ResourceManager): - pass - - -@base.resource("nova", "keypairs", order=next(_nova_order)) -class NovaKeypair(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("nova", "quotas", order=next(_nova_order), - admin_required=True, tenant_resource=True) -class NovaQuotas(QuotaMixin): - pass - - -@base.resource("nova", "flavors", order=next(_nova_order), - admin_required=True, perform_for_admin_only=True) -class NovaFlavors(base.ResourceManager): - pass - - def is_deleted(self): - try: - self._manager().get(self.name()) - except nova_exc.NotFound: - return True - - return False - - -@base.resource("nova", "aggregates", order=next(_nova_order), - admin_required=True, perform_for_admin_only=True) -class NovaAggregate(SynchronizedDeletion, base.ResourceManager): - - def delete(self): - for host in self.raw_resource.hosts: - self.raw_resource.remove_host(host) - super(NovaAggregate, self).delete() - - -# EC2 - -_ec2_order = get_order(250) - - -class EC2Mixin(object): - - def _manager(self): - return getattr(self.user, self._service)() - - -@base.resource("ec2", "servers", order=next(_ec2_order)) -class EC2Server(EC2Mixin, base.ResourceManager): - - def is_deleted(self): - try: - instances = self._manager().get_only_instances( - instance_ids=[self.id()]) - except boto_exception.EC2ResponseError as e: - # NOTE(wtakase): Nova EC2 API returns 'InvalidInstanceID.NotFound' - # if instance not found. In this case, we consider - # instance has already been deleted. - return getattr(e, "error_code") == "InvalidInstanceID.NotFound" - - # NOTE(wtakase): After instance deletion, instance can be 'terminated' - # state. If all instance states are 'terminated', this - # returns True. And if get_only_instances() returns an - # empty list, this also returns True because we consider - # instance has already been deleted. - return all(map(lambda i: i.state == "terminated", instances)) - - def delete(self): - self._manager().terminate_instances(instance_ids=[self.id()]) - - def list(self): - return self._manager().get_only_instances() - - -# NEUTRON - -_neutron_order = get_order(300) - - -@base.resource(service=None, resource=None, admin_required=True) -class NeutronMixin(SynchronizedDeletion, base.ResourceManager): - # Neutron has the best client ever, so we need to override everything - - def supports_extension(self, extension): - exts = self._manager().list_extensions().get("extensions", []) - if any(ext.get("alias") == extension for ext in exts): - return True - return False - - def _manager(self): - client = self._admin_required and self.admin or self.user - return getattr(client, self._service)() - - def id(self): - return self.raw_resource["id"] - - def name(self): - return self.raw_resource["name"] - - def delete(self): - delete_method = getattr(self._manager(), "delete_%s" % self._resource) - delete_method(self.id()) - - def list(self): - if self._resource.endswith("y"): - resources = self._resource[:-1] + "ies" - else: - resources = self._resource + "s" - list_method = getattr(self._manager(), "list_%s" % resources) - result = list_method(tenant_id=self.tenant_uuid)[resources] - if self.tenant_uuid: - result = [r for r in result if r["tenant_id"] == self.tenant_uuid] - - return result - - -class NeutronLbaasV1Mixin(NeutronMixin): - - def list(self): - if self.supports_extension("lbaas"): - return super(NeutronLbaasV1Mixin, self).list() - return [] - - -@base.resource("neutron", "vip", order=next(_neutron_order), - tenant_resource=True) -class NeutronV1Vip(NeutronLbaasV1Mixin): - pass - - -@base.resource("neutron", "health_monitor", order=next(_neutron_order), - tenant_resource=True) -class NeutronV1Healthmonitor(NeutronLbaasV1Mixin): - pass - - -@base.resource("neutron", "pool", order=next(_neutron_order), - tenant_resource=True) -class NeutronV1Pool(NeutronLbaasV1Mixin): - pass - - -class NeutronLbaasV2Mixin(NeutronMixin): - - def list(self): - if self.supports_extension("lbaasv2"): - return super(NeutronLbaasV2Mixin, self).list() - return [] - - -@base.resource("neutron", "loadbalancer", order=next(_neutron_order), - tenant_resource=True) -class NeutronV2Loadbalancer(NeutronLbaasV2Mixin): - - def is_deleted(self): - try: - self._manager().show_loadbalancer(self.id()) - except Exception as e: - return getattr(e, "status_code", 400) == 404 - - return False - - -@base.resource("neutron", "bgpvpn", order=next(_neutron_order), - admin_required=True, perform_for_admin_only=True) -class NeutronBgpvpn(NeutronMixin): - def list(self): - if self.supports_extension("bgpvpn"): - return self._manager().list_bgpvpns()["bgpvpns"] - return [] - - -@base.resource("neutron", "floatingip", order=next(_neutron_order), - tenant_resource=True) -class NeutronFloatingIP(NeutronMixin): - def name(self): - return self.raw_resource.get("description", "") - - def list(self): - if CONF.openstack.pre_newton_neutron: - # NOTE(andreykurilin): Neutron API of pre-newton openstack - # releases does not support description field in Floating IPs. - # We do not want to remove not-rally resources, so let's just do - # nothing here and move pre-newton logic into separate plugins - return [] - return super(NeutronFloatingIP, self).list() - - -@base.resource("neutron", "port", order=next(_neutron_order), - tenant_resource=True) -class NeutronPort(NeutronMixin): - # NOTE(andreykurilin): port is the kind of resource that can be created - # automatically. In this case it doesn't have name field which matches - # our resource name templates. - ROUTER_INTERFACE_OWNERS = ("network:router_interface", - "network:router_interface_distributed", - "network:ha_router_replicated_interface") - - ROUTER_GATEWAY_OWNER = "network:router_gateway" - - def __init__(self, *args, **kwargs): - super(NeutronPort, self).__init__(*args, **kwargs) - self._cache = {} - - def _get_resources(self, resource): - if resource not in self._cache: - resources = getattr(self._manager(), "list_%s" % resource)() - self._cache[resource] = [r for r in resources[resource] - if r["tenant_id"] == self.tenant_uuid] - return self._cache[resource] - - def list(self): - ports = self._get_resources("ports") - for port in ports: - if not port.get("name"): - parent_name = None - if (port["device_owner"] in self.ROUTER_INTERFACE_OWNERS or - port["device_owner"] == self.ROUTER_GATEWAY_OWNER): - # first case is a port created while adding an interface to - # the subnet - # second case is a port created while adding gateway for - # the network - port_router = [r for r in self._get_resources("routers") - if r["id"] == port["device_id"]] - if port_router: - parent_name = port_router[0]["name"] - if parent_name: - port["parent_name"] = parent_name - return ports - - def name(self): - return self.raw_resource.get("parent_name", - self.raw_resource.get("name", "")) - - def delete(self): - device_owner = self.raw_resource["device_owner"] - if (device_owner in self.ROUTER_INTERFACE_OWNERS or - device_owner == self.ROUTER_GATEWAY_OWNER): - if device_owner == self.ROUTER_GATEWAY_OWNER: - self._manager().remove_gateway_router( - self.raw_resource["device_id"]) - - self._manager().remove_interface_router( - self.raw_resource["device_id"], {"port_id": self.id()}) - else: - try: - self._manager().delete_port(self.id()) - except neutron_exceptions.PortNotFoundClient: - # Port can be already auto-deleted, skip silently - LOG.debug("Port %s was not deleted. Skip silently because " - "port can be already auto-deleted." % self.id()) - - -@base.resource("neutron", "subnet", order=next(_neutron_order), - tenant_resource=True) -class NeutronSubnet(NeutronMixin): - pass - - -@base.resource("neutron", "network", order=next(_neutron_order), - tenant_resource=True) -class NeutronNetwork(NeutronMixin): - pass - - -@base.resource("neutron", "router", order=next(_neutron_order), - tenant_resource=True) -class NeutronRouter(NeutronMixin): - pass - - -@base.resource("neutron", "security_group", order=next(_neutron_order), - tenant_resource=True) -class NeutronSecurityGroup(NeutronMixin): - def list(self): - tenant_sgs = super(NeutronSecurityGroup, self).list() - # NOTE(pirsriva): Filter out "default" security group deletion - # by non-admin role user - return filter(lambda r: r["name"] != "default", - tenant_sgs) - - -@base.resource("neutron", "quota", order=next(_neutron_order), - admin_required=True, tenant_resource=True) -class NeutronQuota(QuotaMixin): - - def delete(self): - self.admin.neutron().delete_quota(self.tenant_uuid) - - -# CINDER - -_cinder_order = get_order(400) - - -@base.resource("cinder", "backups", order=next(_cinder_order), - tenant_resource=True) -class CinderVolumeBackup(base.ResourceManager): - pass - - -@base.resource("cinder", "volume_types", order=next(_cinder_order), - admin_required=True, perform_for_admin_only=True) -class CinderVolumeType(base.ResourceManager): - pass - - -@base.resource("cinder", "volume_snapshots", order=next(_cinder_order), - tenant_resource=True) -class CinderVolumeSnapshot(base.ResourceManager): - pass - - -@base.resource("cinder", "transfers", order=next(_cinder_order), - tenant_resource=True) -class CinderVolumeTransfer(base.ResourceManager): - pass - - -@base.resource("cinder", "volumes", order=next(_cinder_order), - tenant_resource=True) -class CinderVolume(base.ResourceManager): - pass - - -@base.resource("cinder", "image_volumes_cache", order=next(_cinder_order), - admin_required=True, perform_for_admin_only=True) -class CinderImageVolumeCache(base.ResourceManager): - - def _glance(self): - return image.Image(self.admin) - - def _manager(self): - return self.admin.cinder().volumes - - def list(self): - images = dict(("image-%s" % i.id, i) - for i in self._glance().list_images()) - return [{"volume": v, "image": images[v.name]} - for v in self._manager().list(search_opts={"all_tenants": 1}) - if v.name in images] - - def name(self): - return self.raw_resource["image"].name - - def id(self): - return self.raw_resource["volume"].id - - -@base.resource("cinder", "quotas", order=next(_cinder_order), - admin_required=True, tenant_resource=True) -class CinderQuotas(QuotaMixin, base.ResourceManager): - pass - - -@base.resource("cinder", "qos_specs", order=next(_cinder_order), - admin_required=True, perform_for_admin_only=True) -class CinderQos(base.ResourceManager): - pass - -# MANILA - -_manila_order = get_order(450) - - -@base.resource("manila", "shares", order=next(_manila_order), - tenant_resource=True) -class ManilaShare(base.ResourceManager): - pass - - -@base.resource("manila", "share_networks", order=next(_manila_order), - tenant_resource=True) -class ManilaShareNetwork(base.ResourceManager): - pass - - -@base.resource("manila", "security_services", order=next(_manila_order), - tenant_resource=True) -class ManilaSecurityService(base.ResourceManager): - pass - - -# GLANCE - -@base.resource("glance", "images", order=500, tenant_resource=True) -class GlanceImage(base.ResourceManager): - - def _client(self): - return image.Image(self.admin or self.user) - - def list(self): - images = (self._client().list_images(owner=self.tenant_uuid) + - self._client().list_images(status="deactivated", - owner=self.tenant_uuid)) - return images - - def delete(self): - client = self._client() - if self.raw_resource.status == "deactivated": - glancev2 = glance_v2.GlanceV2Service(self.admin or self.user) - glancev2.reactivate_image(self.raw_resource.id) - client.delete_image(self.raw_resource.id) - task_utils.wait_for_status( - self.raw_resource, ["deleted"], - check_deletion=True, - update_resource=self._client().get_image, - timeout=CONF.openstack.glance_image_delete_timeout, - check_interval=CONF.openstack.glance_image_delete_poll_interval) - - -# SAHARA - -_sahara_order = get_order(600) - - -@base.resource("sahara", "job_executions", order=next(_sahara_order), - tenant_resource=True) -class SaharaJobExecution(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "jobs", order=next(_sahara_order), - tenant_resource=True) -class SaharaJob(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "job_binary_internals", order=next(_sahara_order), - tenant_resource=True) -class SaharaJobBinaryInternals(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "job_binaries", order=next(_sahara_order), - tenant_resource=True) -class SaharaJobBinary(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "data_sources", order=next(_sahara_order), - tenant_resource=True) -class SaharaDataSource(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "clusters", order=next(_sahara_order), - tenant_resource=True) -class SaharaCluster(base.ResourceManager): - - # Need special treatment for Sahara Cluster because of the way the - # exceptions are described in: - # https://github.com/openstack/python-saharaclient/blob/master/ - # saharaclient/api/base.py#L145 - - def is_deleted(self): - try: - self._manager().get(self.id()) - return False - except saharaclient_base.APIException as e: - return e.error_code == 404 - - -@base.resource("sahara", "cluster_templates", order=next(_sahara_order), - tenant_resource=True) -class SaharaClusterTemplate(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("sahara", "node_group_templates", order=next(_sahara_order), - tenant_resource=True) -class SaharaNodeGroup(SynchronizedDeletion, base.ResourceManager): - pass - - -# CEILOMETER - -@base.resource("ceilometer", "alarms", order=700, tenant_resource=True) -class CeilometerAlarms(SynchronizedDeletion, base.ResourceManager): - - def id(self): - return self.raw_resource.alarm_id - - def list(self): - query = [{ - "field": "project_id", - "op": "eq", - "value": self.tenant_uuid - }] - return self._manager().list(q=query) - - -# ZAQAR - -@base.resource("zaqar", "queues", order=800) -class ZaqarQueues(SynchronizedDeletion, base.ResourceManager): - - def list(self): - return self.user.zaqar().queues() - - -# DESIGNATE -_designate_order = get_order(900) - - -class DesignateResource(SynchronizedDeletion, base.ResourceManager): - - # TODO(boris-42): This should be handled somewhere else. - NAME_PREFIX = "s_rally_" - - def _manager(self, resource=None): - # Map resource names to api / client version - resource = resource or self._resource - version = { - "domains": "1", - "servers": "1", - "records": "1", - "recordsets": "2", - "zones": "2" - }[resource] - - client = self._admin_required and self.admin or self.user - return getattr(getattr(client, self._service)(version), resource) - - def id(self): - """Returns id of resource.""" - return self.raw_resource["id"] - - def name(self): - """Returns name of resource.""" - return self.raw_resource["name"] - - def list(self): - return [item for item in self._manager().list() - if item["name"].startswith(self.NAME_PREFIX)] - - -@base.resource("designate", "domains", order=next(_designate_order), - tenant_resource=True, threads=1) -class DesignateDomain(DesignateResource): - pass - - -@base.resource("designate", "servers", order=next(_designate_order), - admin_required=True, perform_for_admin_only=True, threads=1) -class DesignateServer(DesignateResource): - pass - - -@base.resource("designate", "zones", order=next(_designate_order), - tenant_resource=True, threads=1) -class DesignateZones(DesignateResource): - - def list(self): - marker = None - criterion = {"name": "%s*" % self.NAME_PREFIX} - - while True: - items = self._manager().list(marker=marker, limit=100, - criterion=criterion) - if not items: - break - for item in items: - yield item - marker = items[-1]["id"] - - -# SWIFT - -_swift_order = get_order(1000) - - -class SwiftMixin(SynchronizedDeletion, base.ResourceManager): - - def _manager(self): - client = self._admin_required and self.admin or self.user - return getattr(client, self._service)() - - def id(self): - return self.raw_resource - - def name(self): - # NOTE(stpierre): raw_resource is a list of either [container - # name, object name] (as in SwiftObject) or just [container - # name] (as in SwiftContainer). - return self.raw_resource[-1] - - def delete(self): - delete_method = getattr(self._manager(), "delete_%s" % self._resource) - # NOTE(weiwu): *self.raw_resource is required because for deleting - # container we are passing only container name, to delete object we - # should pass as first argument container and second is object name. - delete_method(*self.raw_resource) - - -@base.resource("swift", "object", order=next(_swift_order), - tenant_resource=True) -class SwiftObject(SwiftMixin): - - def list(self): - object_list = [] - containers = self._manager().get_account(full_listing=True)[1] - for con in containers: - objects = self._manager().get_container(con["name"], - full_listing=True)[1] - for obj in objects: - raw_resource = [con["name"], obj["name"]] - object_list.append(raw_resource) - return object_list - - -@base.resource("swift", "container", order=next(_swift_order), - tenant_resource=True) -class SwiftContainer(SwiftMixin): - - def list(self): - containers = self._manager().get_account(full_listing=True)[1] - return [[con["name"]] for con in containers] - - -# MISTRAL - -_mistral_order = get_order(1100) - - -@base.resource("mistral", "workbooks", order=next(_mistral_order), - tenant_resource=True) -class MistralWorkbooks(SynchronizedDeletion, base.ResourceManager): - def delete(self): - self._manager().delete(self.raw_resource.name) - - -@base.resource("mistral", "workflows", order=next(_mistral_order), - tenant_resource=True) -class MistralWorkflows(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("mistral", "executions", order=next(_mistral_order), - tenant_resource=True) -class MistralExecutions(SynchronizedDeletion, base.ResourceManager): - - def name(self): - # NOTE(andreykurilin): Mistral Execution doesn't have own name which - # we can use for filtering, but it stores workflow id and name, even - # after workflow deletion. - return self.raw_resource.workflow_name - -# MURANO - -_murano_order = get_order(1200) - - -@base.resource("murano", "environments", tenant_resource=True, - order=next(_murano_order)) -class MuranoEnvironments(SynchronizedDeletion, base.ResourceManager): - pass - - -@base.resource("murano", "packages", tenant_resource=True, - order=next(_murano_order)) -class MuranoPackages(base.ResourceManager): - def list(self): - return filter(lambda x: x.name != "Core library", - super(MuranoPackages, self).list()) - - -# IRONIC - -_ironic_order = get_order(1300) - - -@base.resource("ironic", "node", admin_required=True, - order=next(_ironic_order), perform_for_admin_only=True) -class IronicNodes(base.ResourceManager): - - def id(self): - return self.raw_resource.uuid - - -# GNOCCHI - -_gnocchi_order = get_order(1400) - - -@base.resource("gnocchi", "archive_policy_rule", order=next(_gnocchi_order), - admin_required=True, perform_for_admin_only=True) -class GnocchiArchivePolicyRule(base.ResourceManager): - - def name(self): - return self.raw_resource["name"] - - def id(self): - return self.raw_resource["name"] - - -# WATCHER - -_watcher_order = get_order(1500) - - -class WatcherMixin(SynchronizedDeletion, base.ResourceManager): - - def id(self): - return self.raw_resource.uuid - - def list(self): - return self._manager().list(limit=0) - - def is_deleted(self): - from watcherclient.common.apiclient import exceptions - try: - self._manager().get(self.id()) - return False - except exceptions.NotFound: - return True - - -@base.resource("watcher", "audit_template", order=next(_watcher_order), - admin_required=True, perform_for_admin_only=True) -class WatcherTemplate(WatcherMixin): - pass - - -@base.resource("watcher", "action_plan", order=next(_watcher_order), - admin_required=True, perform_for_admin_only=True) -class WatcherActionPlan(WatcherMixin): - - def name(self): - return base.NoName(self._resource) - - -@base.resource("watcher", "audit", order=next(_watcher_order), - admin_required=True, perform_for_admin_only=True) -class WatcherAudit(WatcherMixin): - - def name(self): - return self.raw_resource.uuid - - -# KEYSTONE - -_keystone_order = get_order(9000) - - -class KeystoneMixin(SynchronizedDeletion): - - def _manager(self): - return identity.Identity(self.admin) - - def delete(self): - delete_method = getattr(self._manager(), "delete_%s" % self._resource) - delete_method(self.id()) - - def list(self): - resources = self._resource + "s" - return getattr(self._manager(), "list_%s" % resources)() - - -@base.resource("keystone", "user", order=next(_keystone_order), - admin_required=True, perform_for_admin_only=True) -class KeystoneUser(KeystoneMixin, base.ResourceManager): - pass - - -@base.resource("keystone", "project", order=next(_keystone_order), - admin_required=True, perform_for_admin_only=True) -class KeystoneProject(KeystoneMixin, base.ResourceManager): - pass - - -@base.resource("keystone", "service", order=next(_keystone_order), - admin_required=True, perform_for_admin_only=True) -class KeystoneService(KeystoneMixin, base.ResourceManager): - pass - - -@base.resource("keystone", "role", order=next(_keystone_order), - admin_required=True, perform_for_admin_only=True) -class KeystoneRole(KeystoneMixin, base.ResourceManager): - pass - - -# NOTE(andreykurilin): unfortunately, ec2 credentials doesn't have name -# and id fields. It makes impossible to identify resources belonging to -# particular task. -@base.resource("keystone", "ec2", tenant_resource=True, - order=next(_keystone_order)) -class KeystoneEc2(SynchronizedDeletion, base.ResourceManager): - def _manager(self): - return identity.Identity(self.user) - - def id(self): - return "n/a" - - def name(self): - return base.NoName(self._resource) - - @property - def user_id(self): - return self.user.keystone.auth_ref.user_id - - def list(self): - return self._manager().list_ec2credentials(self.user_id) - - def delete(self): - self._manager().delete_ec2credential( - self.user_id, access=self.raw_resource.access) diff --git a/rally/plugins/openstack/context/__init__.py b/rally/plugins/openstack/context/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/api_versions.py b/rally/plugins/openstack/context/api_versions.py deleted file mode 100644 index 2152666239..0000000000 --- a/rally/plugins/openstack/context/api_versions.py +++ /dev/null @@ -1,268 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack import osclients -from rally.task import context - - -@validation.configure("check_api_versions") -class CheckOpenStackAPIVersionsValidator(validation.Validator): - """Additional validation for api_versions context""" - - def validate(self, context, config, plugin_cls, plugin_cfg): - for client in plugin_cfg: - client_cls = osclients.OSClient.get(client) - try: - if ("service_type" in plugin_cfg[client] or - "service_name" in plugin_cfg[client]): - client_cls.is_service_type_configurable() - - if "version" in plugin_cfg[client]: - client_cls.validate_version(plugin_cfg[client]["version"]) - - except exceptions.RallyException as e: - return self.fail( - "Invalid settings for '%(client)s': %(error)s" % { - "client": client, - "error": e.format_message()}) - - -@validation.add("check_api_versions") -@context.configure(name="api_versions", platform="openstack", order=150) -class OpenStackAPIVersions(context.Context): - """Context for specifying OpenStack clients versions and service types. - - Some OpenStack services support several API versions. To recognize - the endpoints of each version, separate service types are provided in - Keystone service catalog. - - Rally has the map of default service names - service types. But since - service type is an entity, which can be configured manually by admin( - via keystone api) without relation to service name, such map can be - insufficient. - - Also, Keystone service catalog does not provide a map types to name - (this statement is true for keystone < 3.3 ). - - This context was designed for not-default service types and not-default - API versions usage. - - An example of specifying API version: - - .. code-block:: json - - # In this example we will launch NovaKeypair.create_and_list_keypairs - # scenario on 2.2 api version. - { - "NovaKeypair.create_and_list_keypairs": [ - { - "args": { - "key_type": "x509" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "api_versions": { - "nova": { - "version": 2.2 - } - } - } - } - ] - } - - An example of specifying API version along with service type: - - .. code-block:: json - - # In this example we will launch CinderVolumes.create_and_attach_volume - # scenario on Cinder V2 - { - "CinderVolumes.create_and_attach_volume": [ - { - "args": { - "size": 10, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "m1.tiny" - }, - "create_volume_params": { - "availability_zone": "nova" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "api_versions": { - "cinder": { - "version": 2, - "service_type": "volumev2" - } - } - } - } - ] - } - - Also, it possible to use service name as an identifier of service endpoint, - but an admin user is required (Keystone can return map of service - names - types, but such API is permitted only for admin). An example: - - .. code-block:: json - - # Similar to the previous example, but `service_name` argument is used - # instead of `service_type` - { - "CinderVolumes.create_and_attach_volume": [ - { - "args": { - "size": 10, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "m1.tiny" - }, - "create_volume_params": { - "availability_zone": "nova" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "api_versions": { - "cinder": { - "version": 2, - "service_name": "cinderv2" - } - } - } - } - ] - } - - """ - VERSION_SCHEMA = { - "anyOf": [ - {"type": "string", "description": "a string-like version."}, - {"type": "number", "description": "a number-like version."} - ] - } - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "patternProperties": { - "^[a-z]+$": { - "type": "object", - "oneOf": [ - { - "description": "version only", - "properties": { - "version": VERSION_SCHEMA, - }, - "required": ["version"], - "additionalProperties": False - }, - { - "description": "version and service_name", - "properties": { - "version": VERSION_SCHEMA, - "service_name": {"type": "string"} - }, - "required": ["service_name"], - "additionalProperties": False - }, - { - "description": "version and service_type", - "properties": { - "version": VERSION_SCHEMA, - "service_type": {"type": "string"} - }, - "required": ["service_type"], - "additionalProperties": False - } - ], - } - }, - "minProperties": 1, - "additionalProperties": False - } - - def setup(self): - # FIXME(andreykurilin): move all checks to validate method. - - # use admin only when `service_name` is presented - admin_clients = osclients.Clients( - self.context.get("admin", {}).get("credential")) - clients = osclients.Clients(random.choice( - self.context["users"])["credential"]) - services = clients.keystone.service_catalog.get_endpoints() - services_from_admin = None - for client_name, conf in self.config.items(): - if "service_type" in conf and conf["service_type"] not in services: - raise exceptions.ValidationError( - "There is no service with '%s' type in your environment." - % conf["service_type"]) - elif "service_name" in conf: - if not self.context.get("admin", {}).get("credential"): - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg="Setting 'service_name' is admin only operation.") - if not services_from_admin: - services_from_admin = dict( - [(s.name, s.type) - for s in admin_clients.keystone().services.list()]) - if conf["service_name"] not in services_from_admin: - raise exceptions.ValidationError( - "There is no '%s' service in your environment" - % conf["service_name"]) - - # TODO(boris-42): Use separate key ["openstack"]["versions"] - self.context["config"]["api_versions@openstack"][client_name][ - "service_type"] = services_from_admin[conf["service_name"]] - - # NOTE(boris-42): Required to be backward compatible - self.context["config"]["api_versions"] = ( - self.context["config"]["api_versions@openstack"]) - - def cleanup(self): - # nothing to do here - pass diff --git a/rally/plugins/openstack/context/ceilometer/__init__.py b/rally/plugins/openstack/context/ceilometer/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/ceilometer/samples.py b/rally/plugins/openstack/context/ceilometer/samples.py deleted file mode 100644 index 0c9f26afd1..0000000000 --- a/rally/plugins/openstack/context/ceilometer/samples.py +++ /dev/null @@ -1,179 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from six import moves - -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.scenarios.ceilometer import utils as ceilo_utils -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="ceilometer", platform="openstack", order=450) -class CeilometerSampleGenerator(context.Context): - """Creates ceilometer samples and resources.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "counter_name": { - "type": "string" - }, - "counter_type": { - "type": "string" - }, - "counter_unit": { - "type": "string" - }, - "counter_volume": { - "type": "number", - "minimum": 0 - }, - "resources_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "samples_per_resource": { - "type": "integer", - "minimum": 1 - }, - "timestamp_interval": { - "type": "integer", - "minimum": 1 - }, - "metadata_list": { - "type": "array", - "items": { - "type": "object", - "properties": { - "status": { - "type": "string" - }, - "name": { - "type": "string" - }, - "deleted": { - "type": "string" - }, - "created_at": { - "type": "string" - } - }, - "additionalProperties": False - } - }, - "batch_size": { - "type": "integer", - "minimum": 1 - }, - "batches_allow_lose": { - "type": "integer", - "minimum": 0 - } - }, - "required": ["counter_name", "counter_type", "counter_unit", - "counter_volume"], - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "resources_per_tenant": 5, - "samples_per_resource": 5, - "timestamp_interval": 60 - } - - def _store_batch_samples(self, scenario, batches, batches_allow_lose): - batches_allow_lose = batches_allow_lose or 0 - unsuccess = 0 - for i, batch in enumerate(batches, start=1): - try: - samples = scenario._create_samples(batch) - except Exception: - unsuccess += 1 - LOG.warning("Failed to store batch %d of Ceilometer samples" - " during context creation" % i) - if unsuccess > batches_allow_lose: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg="Context failed to store too many batches of samples") - - return samples - - def setup(self): - new_sample = { - "counter_name": self.config["counter_name"], - "counter_type": self.config["counter_type"], - "counter_unit": self.config["counter_unit"], - "counter_volume": self.config["counter_volume"], - } - resources = [] - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id]["samples"] = [] - self.context["tenants"][tenant_id]["resources"] = [] - scenario = ceilo_utils.CeilometerScenario( - context={"user": user, "task": self.context["task"]} - ) - for i in moves.xrange(self.config["resources_per_tenant"]): - samples_to_create = scenario._make_samples( - count=self.config["samples_per_resource"], - interval=self.config["timestamp_interval"], - metadata_list=self.config.get("metadata_list"), - batch_size=self.config.get("batch_size"), - **new_sample) - samples = self._store_batch_samples( - scenario, samples_to_create, - self.config.get("batches_allow_lose") - ) - for sample in samples: - self.context["tenants"][tenant_id]["samples"].append( - sample.to_dict()) - self.context["tenants"][tenant_id]["resources"].append( - samples[0].resource_id) - resources.append((user, samples[0].resource_id)) - - # NOTE(boris-42): Context should wait until samples are processed - from ceilometerclient import exc - - for user, resource_id in resources: - scenario = ceilo_utils.CeilometerScenario( - context={"user": user, "task": self.context["task"]}) - - success = False - for i in range(60): - try: - scenario._get_resource(resource_id) - success = True - break - except exc.HTTPNotFound: - time.sleep(3) - if not success: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg="Ceilometer Resource %s is not found" % resource_id) - - def cleanup(self): - # We don't have API for removal of samples and resources - pass diff --git a/rally/plugins/openstack/context/cinder/__init__.py b/rally/plugins/openstack/context/cinder/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/cinder/volume_types.py b/rally/plugins/openstack/context/cinder/volume_types.py deleted file mode 100644 index 1f51538ef6..0000000000 --- a/rally/plugins/openstack/context/cinder/volume_types.py +++ /dev/null @@ -1,61 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.plugins.openstack.services.storage import block -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True) -@context.configure(name="volume_types", platform="openstack", order=410) -class VolumeTypeGenerator(context.Context): - """Adds cinder volumes types.""" - - CONFIG_SCHEMA = { - "type": "array", - "$schema": consts.JSON_SCHEMA, - "items": {"type": "string"} - } - - def setup(self): - admin_clients = osclients.Clients( - self.context.get("admin", {}).get("credential"), - api_info=self.context["config"].get("api_versions")) - cinder_service = block.BlockStorage( - admin_clients, - name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - self.context["volume_types"] = [] - for vtype_name in self.config: - LOG.debug("Creating Cinder volume type %s" % vtype_name) - vtype = cinder_service.create_volume_type(vtype_name) - self.context["volume_types"].append({"id": vtype.id, - "name": vtype_name}) - - def cleanup(self): - mather = utils.make_name_matcher(*self.config) - resource_manager.cleanup( - names=["cinder.volume_types"], - admin=self.context["admin"], - api_versions=self.context["config"].get("api_versions"), - superclass=mather, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/cinder/volumes.py b/rally/plugins/openstack/context/cinder/volumes.py deleted file mode 100644 index 44295fd06a..0000000000 --- a/rally/plugins/openstack/context/cinder/volumes.py +++ /dev/null @@ -1,83 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils as rutils -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.plugins.openstack.services.storage import block -from rally.task import context - - -@context.configure(name="volumes", platform="openstack", order=420) -class VolumeGenerator(context.Context): - """Creates volumes for each tenant.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "size": { - "type": "integer", - "minimum": 1 - }, - "type": { - "oneOf": [{"type": "string", - "description": "a string-like type of volume to " - "create."}, - {"type": "null", - "description": "Use default type for volume to " - "create."}] - }, - "volumes_per_tenant": { - "type": "integer", - "minimum": 1 - } - }, - "required": ["size"], - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "volumes_per_tenant": 1 - } - - def setup(self): - size = self.config["size"] - volume_type = self.config.get("type", None) - volumes_per_tenant = self.config["volumes_per_tenant"] - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id].setdefault("volumes", []) - clients = osclients.Clients( - user["credential"], - api_info=self.context["config"].get("api_versions")) - cinder_service = block.BlockStorage( - clients, - name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - for i in range(volumes_per_tenant): - vol = cinder_service.create_volume(size, - volume_type=volume_type) - self.context["tenants"][tenant_id]["volumes"].append( - vol._as_dict()) - - def cleanup(self): - resource_manager.cleanup( - names=["cinder.volumes"], - users=self.context.get("users", []), - api_versions=self.context["config"].get("api_versions"), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/cleanup/__init__.py b/rally/plugins/openstack/context/cleanup/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/cleanup/admin.py b/rally/plugins/openstack/context/cleanup/admin.py deleted file mode 100644 index bdd2414b54..0000000000 --- a/rally/plugins/openstack/context/cleanup/admin.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from rally.common import validation -from rally.plugins.openstack.cleanup import manager -from rally.plugins.openstack.context.cleanup import base -from rally.plugins.openstack import scenario -from rally.task import context - - -@validation.add(name="check_cleanup_resources", admin_required=True) -# NOTE(amaretskiy): Set order to run this just before UserCleanup -@context.configure(name="admin_cleanup", platform="openstack", - order=(sys.maxsize - 1), hidden=True) -class AdminCleanup(base.CleanupMixin, context.Context): - """Context class for admin resources cleanup.""" - - def cleanup(self): - manager.cleanup( - names=self.config, - admin_required=True, - admin=self.context["admin"], - users=self.context.get("users", []), - api_versions=self.context["config"].get("api_versions"), - superclass=scenario.OpenStackScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/cleanup/base.py b/rally/plugins/openstack/context/cleanup/base.py deleted file mode 100644 index 39232694b2..0000000000 --- a/rally/plugins/openstack/context/cleanup/base.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager - - -@validation.configure("check_cleanup_resources") -class CheckCleanupResourcesValidator(validation.Validator): - - def __init__(self, admin_required): - """Validates that openstack resource managers exist - - :param admin_required: describes access level to resource - """ - super(CheckCleanupResourcesValidator, self).__init__() - self.admin_required = admin_required - - def validate(self, context, config, plugin_cls, plugin_cfg): - missing = set(plugin_cfg) - missing -= manager.list_resource_names( - admin_required=self.admin_required) - missing = ", ".join(missing) - if missing: - return self.fail( - "Couldn't find cleanup resource managers: %s" % missing) - - -class CleanupMixin(object): - - CONFIG_SCHEMA = { - "type": "array", - "$schema": consts.JSON_SCHEMA, - "items": { - "type": "string", - } - } - - def setup(self): - pass diff --git a/rally/plugins/openstack/context/cleanup/user.py b/rally/plugins/openstack/context/cleanup/user.py deleted file mode 100644 index 86acedb0b3..0000000000 --- a/rally/plugins/openstack/context/cleanup/user.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys - -from rally.common import validation -from rally.plugins.openstack.cleanup import manager -from rally.plugins.openstack.context.cleanup import base -from rally.plugins.openstack import scenario -from rally.task import context - - -@validation.add(name="check_cleanup_resources", admin_required=False) -# NOTE(amaretskiy): Set maximum order to run this last -@context.configure(name="cleanup", platform="openstack", order=sys.maxsize, - hidden=True) -class UserCleanup(base.CleanupMixin, context.Context): - """Context class for user resources cleanup.""" - - def cleanup(self): - manager.cleanup( - names=self.config, - admin_required=False, - users=self.context.get("users", []), - api_versions=self.context["config"].get("api_versions"), - superclass=scenario.OpenStackScenario, - task_id=self.get_owner_id() - ) diff --git a/rally/plugins/openstack/context/dataplane/__init__.py b/rally/plugins/openstack/context/dataplane/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/dataplane/heat.py b/rally/plugins/openstack/context/dataplane/heat.py deleted file mode 100644 index b6c7781b64..0000000000 --- a/rally/plugins/openstack/context/dataplane/heat.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pkgutil - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.plugins.openstack.scenarios.heat import utils as heat_utils -from rally.task import context - - -def get_data(filename_or_resource): - if isinstance(filename_or_resource, list): - return pkgutil.get_data(*filename_or_resource) - return open(filename_or_resource).read() - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="heat_dataplane", platform="openstack", order=435) -class HeatDataplane(context.Context): - """Context class for create stack by given template. - - This context will create stacks by given template for each tenant and - add details to context. Following details will be added: - - * id of stack; - * template file contents; - * files dictionary; - * stack parameters; - - Heat template should define a "gate" node which will interact with Rally - by ssh and workload nodes by any protocol. To make this possible heat - template should accept the following parameters: - - * network_id: id of public network - * router_id: id of external router to connect "gate" node - * key_name: name of nova ssh keypair to use for "gate" node - """ - FILE_SCHEMA = { - "description": "", - "type": "string", - } - RESOURCE_SCHEMA = { - "description": "", - "type": "array", - "minItems": 2, - "maxItems": 2, - "items": {"type": "string"} - } - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "stacks_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "template": { - "oneOf": [FILE_SCHEMA, RESOURCE_SCHEMA], - }, - "files": { - "type": "object", - "additionalProperties": True - }, - "parameters": { - "type": "object", - "additionalProperties": True - }, - "context_parameters": { - "type": "object", - "additionalProperties": True - }, - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "stacks_per_tenant": 1, - } - - def _get_context_parameter(self, user, tenant_id, path): - value = {"user": user, "tenant": self.context["tenants"][tenant_id]} - for key in path.split("."): - try: - # try to cast string to int in order to support integer keys - # e.g 'spam.1.eggs' will be translated to ["spam"][1]["eggs"] - key = int(key) - except ValueError: - pass - try: - value = value[key] - except KeyError: - raise exceptions.RallyException( - "There is no key %s in context" % path) - return value - - def _get_public_network_id(self): - nc = osclients.Clients(self.context["admin"]["credential"]).neutron() - networks = nc.list_networks(**{"router:external": True})["networks"] - return networks[0]["id"] - - def setup(self): - template = get_data(self.config["template"]) - files = {} - for key, filename in self.config.get("files", {}).items(): - files[key] = get_data(filename) - parameters = self.config.get("parameters", rutils.LockedDict()) - with parameters.unlocked(): - if "network_id" not in parameters: - parameters["network_id"] = self._get_public_network_id() - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - for name, path in self.config.get("context_parameters", - {}).items(): - parameters[name] = self._get_context_parameter(user, - tenant_id, - path) - if "router_id" not in parameters: - networks = self.context["tenants"][tenant_id]["networks"] - parameters["router_id"] = networks[0]["router_id"] - if "key_name" not in parameters: - parameters["key_name"] = user["keypair"]["name"] - heat_scenario = heat_utils.HeatScenario( - {"user": user, "task": self.context["task"], - "owner_id": self.context["owner_id"]}) - self.context["tenants"][tenant_id]["stack_dataplane"] = [] - for i in range(self.config["stacks_per_tenant"]): - stack = heat_scenario._create_stack(template, files=files, - parameters=parameters) - tenant_data = self.context["tenants"][tenant_id] - tenant_data["stack_dataplane"].append([stack.id, template, - files, parameters]) - - def cleanup(self): - resource_manager.cleanup(names=["heat.stacks"], - users=self.context.get("users", []), - superclass=heat_utils.HeatScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/designate/__init__.py b/rally/plugins/openstack/context/designate/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/designate/zones.py b/rally/plugins/openstack/context/designate/zones.py deleted file mode 100644 index 1d402880f6..0000000000 --- a/rally/plugins/openstack/context/designate/zones.py +++ /dev/null @@ -1,60 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.designate import utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="zones", platform="openstack", order=600) -class ZoneGenerator(context.Context): - """Context to add `zones_per_tenant` zones for each tenant.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "zones_per_tenant": { - "type": "integer", - "minimum": 1 - }, - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "zones_per_tenant": 1 - } - - def setup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id].setdefault("zones", []) - designate_util = utils.DesignateScenario( - {"user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"]}) - for i in range(self.config["zones_per_tenant"]): - zone = designate_util._create_zone() - self.context["tenants"][tenant_id]["zones"].append(zone) - - def cleanup(self): - resource_manager.cleanup(names=["designate.zones"], - users=self.context.get("users", []), - superclass=utils.DesignateScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/ec2/__init__.py b/rally/plugins/openstack/context/ec2/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/ec2/servers.py b/rally/plugins/openstack/context/ec2/servers.py deleted file mode 100644 index 7f7a6eeed1..0000000000 --- a/rally/plugins/openstack/context/ec2/servers.py +++ /dev/null @@ -1,96 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import utils as rutils -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.ec2 import utils as ec2_utils -from rally.plugins.openstack import types -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@context.configure(name="ec2_servers", platform="openstack", order=460) -class EC2ServerGenerator(context.Context): - """Creates specified amount of nova servers in each tenant uses ec2 API.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "additionalProperties": False - }, - "flavor": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "additionalProperties": False - }, - "servers_per_tenant": { - "type": "integer", - "minimum": 1 - } - }, - "required": ["image", "flavor", "servers_per_tenant"], - "additionalProperties": False - } - - def setup(self): - image = self.config["image"] - flavor = self.config["flavor"] - - image_id = types.EC2Image(self.context).pre_process( - resource_spec=image, config={}) - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - LOG.debug("Booting servers for tenant %s " % user["tenant_id"]) - ec2_scenario = ec2_utils.EC2Scenario({ - "user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"]}) - - LOG.debug( - "Calling _boot_servers with " - "image_id=%(image_id)s flavor_name=%(flavor_name)s " - "servers_per_tenant=%(servers_per_tenant)s" - % {"image_id": image_id, - "flavor_name": flavor["name"], - "servers_per_tenant": self.config["servers_per_tenant"]}) - - servers = ec2_scenario._boot_servers( - image_id, flavor["name"], self.config["servers_per_tenant"]) - - current_servers = [server.id for server in servers] - - self.context["tenants"][tenant_id]["ec2_servers"] = current_servers - - def cleanup(self): - resource_manager.cleanup(names=["ec2.servers"], - users=self.context.get("users", []), - superclass=ec2_utils.EC2Scenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/glance/__init__.py b/rally/plugins/openstack/context/glance/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/glance/images.py b/rally/plugins/openstack/context/glance/images.py deleted file mode 100644 index 95e26f1449..0000000000 --- a/rally/plugins/openstack/context/glance/images.py +++ /dev/null @@ -1,210 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.plugins.openstack.services.image import image -from rally.task import context - -CONF = cfg.CONF - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="images", platform="openstack", order=410) -class ImageGenerator(context.Context): - """Uploads specified Glance images to every tenant.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image_url": { - "type": "string", - "description": "Location of the source to create image from." - }, - "disk_format": { - "description": "The format of the disk.", - "enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki", - "ari", "ami"] - }, - "container_format": { - "description": "Format of the image container.", - "enum": ["aki", "ami", "ari", "bare", "docker", "ova", "ovf"] - }, - "image_name": { - "type": "string", - "description": "The name of image to create. NOTE: it will be " - "ignored in case when `images_per_tenant` is " - "bigger then 1." - }, - "min_ram": { - "description": "Amount of RAM in MB", - "type": "integer", - "minimum": 0 - }, - "min_disk": { - "description": "Amount of disk space in GB", - "type": "integer", - "minimum": 0 - }, - "visibility": { - "description": "Visibility for this image ('shared' and " - "'community' are available only in case of " - "Glance V2).", - "enum": ["public", "private", "shared", "community"] - }, - "images_per_tenant": { - "description": "The number of images to create per one single " - "tenant.", - "type": "integer", - "minimum": 1 - }, - "image_args": { - "description": "This param is deprecated since Rally-0.10.0, " - "specify exact arguments in a root section of " - "context instead.", - "type": "object", - "additionalProperties": True - }, - "image_container": { - "description": "This param is deprecated since Rally-0.10.0, " - "use `container_format` instead.", - "type": "string", - }, - "image_type": { - "description": "This param is deprecated since Rally-0.10.0, " - "use `disk_format` instead.", - "enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki", - "ari", "ami"], - }, - }, - "oneOf": [{"description": "It is been used since Rally 0.10.0", - "required": ["image_url", "disk_format", - "container_format"]}, - {"description": "One of backward compatible way", - "required": ["image_url", "image_type", - "container_format"]}, - {"description": "One of backward compatible way", - "required": ["image_url", "disk_format", - "image_container"]}, - {"description": "One of backward compatible way", - "required": ["image_url", "image_type", - "image_container"]}], - "additionalProperties": False - } - - DEFAULT_CONFIG = {"images_per_tenant": 1} - - def setup(self): - image_url = self.config.get("image_url") - disk_format = self.config.get("disk_format") - container_format = self.config.get("container_format") - images_per_tenant = self.config.get("images_per_tenant") - visibility = self.config.get("visibility", "private") - min_disk = self.config.get("min_disk", 0) - min_ram = self.config.get("min_ram", 0) - image_args = self.config.get("image_args", {}) - - if "image_type" in self.config: - LOG.warning("The 'image_type' argument is deprecated since " - "Rally 0.10.0, use disk_format argument instead") - if not disk_format: - disk_format = self.config["image_type"] - - if "image_container" in self.config: - LOG.warning("The 'image_container' argument is deprecated since " - "Rally 0.10.0; use container_format argument instead") - if not container_format: - container_format = self.config["image_container"] - - if image_args: - LOG.warning( - "The 'image_args' argument is deprecated since Rally 0.10.0; " - "specify arguments in a root section of context instead") - - if "is_public" in image_args: - if "visibility" not in self.config: - visibility = ("public" if image_args["is_public"] - else "private") - if "min_ram" in image_args: - if "min_ram" not in self.config: - min_ram = image_args["min_ram"] - - if "min_disk" in image_args: - if "min_disk" not in self.config: - min_disk = image_args["min_disk"] - - # None image_name means that image.Image will generate a random name - image_name = None - if "image_name" in self.config and images_per_tenant == 1: - image_name = self.config["image_name"] - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - current_images = [] - clients = osclients.Clients( - user["credential"], - api_info=self.context["config"].get("api_versions")) - image_service = image.Image( - clients, name_generator=self.generate_random_name) - - for i in range(images_per_tenant): - image_obj = image_service.create_image( - image_name=image_name, - container_format=container_format, - image_location=image_url, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - current_images.append(image_obj.id) - - self.context["tenants"][tenant_id]["images"] = current_images - - def cleanup(self): - if self.context.get("admin", {}): - # NOTE(andreykurilin): Glance does not require the admin for - # listing tenant images, but the admin is required for - # discovering Cinder volumes which might be created for the - # purpose of caching. Removing such volumes are optional step, - # since Cinder should have own mechanism like garbage collector, - # but if we can, let's remove everything and make the cloud as - # close as possible to the original state. - admin = self.context["admin"] - admin_required = None - else: - admin = None - admin_required = False - - if "image_name" in self.config: - matcher = rutils.make_name_matcher(self.config["image_name"]) - else: - matcher = self.__class__ - - resource_manager.cleanup(names=["glance.images", - "cinder.image_volumes_cache"], - admin=admin, - admin_required=admin_required, - users=self.context.get("users", []), - api_versions=self.context["config"].get( - "api_versions"), - superclass=matcher, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/heat/__init__.py b/rally/plugins/openstack/context/heat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/heat/stacks.py b/rally/plugins/openstack/context/heat/stacks.py deleted file mode 100644 index 5875ecb536..0000000000 --- a/rally/plugins/openstack/context/heat/stacks.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.heat import utils as heat_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="stacks", platform="openstack", order=435) -class StackGenerator(context.Context): - """Context class for create temporary stacks with resources. - - Stack generator allows to generate arbitrary number of stacks for - each tenant before test scenarios. In addition, it allows to define - number of resources (namely OS::Heat::RandomString) that will be created - inside each stack. After test execution the stacks will be - automatically removed from heat. - """ - - # The schema of the context configuration format - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - - "properties": { - "stacks_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "resources_per_stack": { - "type": "integer", - "minimum": 1 - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "stacks_per_tenant": 2, - "resources_per_stack": 10 - } - - @staticmethod - def _prepare_stack_template(res_num): - template = { - "heat_template_version": "2014-10-16", - "description": "Test template for rally", - "resources": {} - } - rand_string = {"type": "OS::Heat::RandomString"} - for i in range(res_num): - template["resources"]["TestResource%d" % i] = rand_string - return template - - def setup(self): - template = self._prepare_stack_template( - self.config["resources_per_stack"]) - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - heat_scenario = heat_utils.HeatScenario( - {"user": user, "task": self.context["task"], - "owner_id": self.context["owner_id"]}) - self.context["tenants"][tenant_id]["stacks"] = [] - for i in range(self.config["stacks_per_tenant"]): - stack = heat_scenario._create_stack(template) - self.context["tenants"][tenant_id]["stacks"].append(stack.id) - - def cleanup(self): - resource_manager.cleanup(names=["heat.stacks"], - users=self.context.get("users", []), - superclass=heat_utils.HeatScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/keystone/__init__.py b/rally/plugins/openstack/context/keystone/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/keystone/roles.py b/rally/plugins/openstack/context/keystone/roles.py deleted file mode 100644 index 9c51670398..0000000000 --- a/rally/plugins/openstack/context/keystone/roles.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import broker -from rally.common import cfg -from rally.common import logging -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack import osclients -from rally.plugins.openstack.services.identity import identity -from rally.task import context - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="roles", platform="openstack", order=330) -class RoleGenerator(context.Context): - """Context class for assigning roles for users.""" - - CONFIG_SCHEMA = { - "type": "array", - "$schema": consts.JSON_SCHEMA, - "items": { - "type": "string", - "description": "The name of role to assign to user" - } - } - - def __init__(self, ctx): - super(RoleGenerator, self).__init__(ctx) - self.credential = self.context["admin"]["credential"] - self.workers = ( - cfg.CONF.openstack.roles_context_resource_management_workers) - - def _get_role_object(self, context_role): - """Check if role exists. - - :param context_role: name of existing role. - """ - keystone = identity.Identity(osclients.Clients(self.credential)) - default_roles = keystone.list_roles() - for def_role in default_roles: - if str(def_role.name) == context_role: - return def_role - else: - raise exceptions.NotFoundException( - "There is no role with name `%s`" % context_role) - - def _get_user_role_ids(self, user_id, project_id): - keystone = identity.Identity(osclients.Clients(self.credential)) - user_roles = keystone.list_roles(user_id=user_id, - project_id=project_id) - return [role.id for role in user_roles] - - def _get_consumer(self, func_name): - def consume(cache, args): - role_id, user_id, project_id = args - if "client" not in cache: - clients = osclients.Clients(self.credential) - cache["client"] = identity.Identity(clients) - getattr(cache["client"], func_name)(role_id=role_id, - user_id=user_id, - project_id=project_id) - return consume - - def setup(self): - """Add all roles to users.""" - threads = self.workers - roles_dict = {} - - def publish(queue): - for context_role in self.config: - role = self._get_role_object(context_role) - roles_dict[role.id] = role.name - LOG.debug("Adding role %(role_name)s having ID %(role_id)s " - "to all users using %(threads)s threads" - % {"role_name": role.name, - "role_id": role.id, - "threads": threads}) - for user in self.context["users"]: - if "roles" not in user: - user["roles"] = self._get_user_role_ids( - user["id"], - user["tenant_id"]) - user["assigned_roles"] = [] - if role.id not in user["roles"]: - args = (role.id, user["id"], user["tenant_id"]) - queue.append(args) - user["assigned_roles"].append(role.id) - - broker.run(publish, self._get_consumer("add_role"), threads) - self.context["roles"] = roles_dict - - def cleanup(self): - """Remove assigned roles from users.""" - threads = self.workers - - def publish(queue): - for role_id in self.context["roles"]: - LOG.debug("Removing assigned role %s from all users" % role_id) - for user in self.context["users"]: - if role_id in user["assigned_roles"]: - args = (role_id, user["id"], user["tenant_id"]) - queue.append(args) - - broker.run(publish, self._get_consumer("revoke_role"), threads) diff --git a/rally/plugins/openstack/context/keystone/users.py b/rally/plugins/openstack/context/keystone/users.py deleted file mode 100644 index 073ba19d82..0000000000 --- a/rally/plugins/openstack/context/keystone/users.py +++ /dev/null @@ -1,322 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import uuid - -from rally.common import broker -from rally.common import cfg -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack import credential -from rally.plugins.openstack import osclients -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.wrappers import network -from rally.task import context - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - -RESOURCE_MANAGEMENT_WORKERS_DESCR = ("The number of concurrent threads to use " - "for serving users context.") -PROJECT_DOMAIN_DESCR = "ID of domain in which projects will be created." -USER_DOMAIN_DESCR = "ID of domain in which users will be created." - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="users", platform="openstack", order=100) -class UserGenerator(context.Context): - """Creates specified amount of keystone users and tenants.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "anyOf": [ - {"description": "Create new temporary users and tenants.", - "properties": { - "tenants": { - "type": "integer", - "minimum": 1, - "description": "The number of tenants to create." - }, - "users_per_tenant": { - "type": "integer", - "minimum": 1, - "description": "The number of users to create per one " - "tenant."}, - "resource_management_workers": { - "type": "integer", - "minimum": 1, - "description": RESOURCE_MANAGEMENT_WORKERS_DESCR}, - "project_domain": { - "type": "string", - "description": PROJECT_DOMAIN_DESCR}, - "user_domain": { - "type": "string", - "description": USER_DOMAIN_DESCR}, - "user_choice_method": { - "$ref": "#/definitions/user_choice_method"}}, - "additionalProperties": False}, - # TODO(andreykurilin): add ability to specify users here. - {"description": "Use existing users and tenants.", - "properties": { - "user_choice_method": { - "$ref": "#/definitions/user_choice_method"} - }, - "additionalProperties": False} - ], - "definitions": { - "user_choice_method": { - "enum": ["random", "round_robin"], - "description": "The mode of balancing usage of users between " - "scenario iterations."} - - } - } - - DEFAULT_CONFIG = {"user_choice_method": "random"} - - DEFAULT_FOR_NEW_USERS = { - "tenants": 1, - "users_per_tenant": 1, - "resource_management_workers": - cfg.CONF.openstack.users_context_resource_management_workers, - } - - def __init__(self, context): - super(UserGenerator, self).__init__(context) - - creds = self.env["platforms"]["openstack"] - if creds.get("admin"): - context["admin"] = { - "credential": credential.OpenStackCredential(**creds["admin"])} - - if creds["users"] and not (set(self.config) - {"user_choice_method"}): - self.existing_users = creds["users"] - else: - self.existing_users = [] - self.credential = context["admin"]["credential"] - project_domain = (self.credential["project_domain_name"] or - cfg.CONF.openstack.project_domain) - user_domain = (self.credential["user_domain_name"] or - cfg.CONF.openstack.user_domain) - self.DEFAULT_FOR_NEW_USERS["project_domain"] = project_domain - self.DEFAULT_FOR_NEW_USERS["user_domain"] = user_domain - with self.config.unlocked(): - for key, value in self.DEFAULT_FOR_NEW_USERS.items(): - self.config.setdefault(key, value) - - def _remove_default_security_group(self): - """Delete default security group for tenants.""" - clients = osclients.Clients(self.credential) - - if consts.Service.NEUTRON not in clients.services().values(): - return - - use_sg, msg = network.wrap(clients, self).supports_extension( - "security-group") - if not use_sg: - LOG.debug("Security group context is disabled: %s" % msg) - return - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - with logging.ExceptionLogger( - LOG, "Unable to delete default security group"): - uclients = osclients.Clients(user["credential"]) - security_groups = uclients.neutron()\ - .list_security_groups(tenant_id=tenant_id) - default = [sg for sg in security_groups["security_groups"] - if sg["name"] == "default"] - if default: - clients.neutron().delete_security_group(default[0]["id"]) - - def _create_tenants(self): - threads = self.config["resource_management_workers"] - - tenants = collections.deque() - - def publish(queue): - for i in range(self.config["tenants"]): - args = (self.config["project_domain"], self.task["uuid"], i) - queue.append(args) - - def consume(cache, args): - domain, task_id, i = args - if "client" not in cache: - clients = osclients.Clients(self.credential) - cache["client"] = identity.Identity( - clients, name_generator=self.generate_random_name) - tenant = cache["client"].create_project(domain_name=domain) - tenant_dict = {"id": tenant.id, "name": tenant.name, "users": []} - tenants.append(tenant_dict) - - # NOTE(msdubov): consume() will fill the tenants list in the closure. - broker.run(publish, consume, threads) - tenants_dict = {} - for t in tenants: - tenants_dict[t["id"]] = t - - return tenants_dict - - def _create_users(self): - # NOTE(msdubov): This should be called after _create_tenants(). - threads = self.config["resource_management_workers"] - users_per_tenant = self.config["users_per_tenant"] - default_role = cfg.CONF.openstack.keystone_default_role - - users = collections.deque() - - def publish(queue): - for tenant_id in self.context["tenants"]: - for user_id in range(users_per_tenant): - username = self.generate_random_name() - password = str(uuid.uuid4()) - args = (username, password, self.config["project_domain"], - self.config["user_domain"], tenant_id) - queue.append(args) - - def consume(cache, args): - username, password, project_dom, user_dom, tenant_id = args - if "client" not in cache: - clients = osclients.Clients(self.credential) - cache["client"] = identity.Identity( - clients, name_generator=self.generate_random_name) - client = cache["client"] - user = client.create_user(username, password=password, - project_id=tenant_id, - domain_name=user_dom, - default_role=default_role) - user_credential = credential.OpenStackCredential( - auth_url=self.credential["auth_url"], - username=user.name, - password=password, - tenant_name=self.context["tenants"][tenant_id]["name"], - permission=consts.EndpointPermission.USER, - project_domain_name=project_dom, - user_domain_name=user_dom, - endpoint_type=self.credential["endpoint_type"], - https_insecure=self.credential["https_insecure"], - https_cacert=self.credential["https_cacert"], - region_name=self.credential["region_name"], - profiler_hmac_key=self.credential["profiler_hmac_key"], - profiler_conn_str=self.credential["profiler_conn_str"]) - users.append({"id": user.id, - "credential": user_credential, - "tenant_id": tenant_id}) - - # NOTE(msdubov): consume() will fill the users list in the closure. - broker.run(publish, consume, threads) - return list(users) - - def _get_consumer_for_deletion(self, func_name): - def consume(cache, resource_id): - if "client" not in cache: - clients = osclients.Clients(self.credential) - cache["client"] = identity.Identity(clients) - getattr(cache["client"], func_name)(resource_id) - return consume - - def _delete_tenants(self): - threads = self.config["resource_management_workers"] - - def publish(queue): - for tenant_id in self.context["tenants"]: - queue.append(tenant_id) - - broker.run(publish, self._get_consumer_for_deletion("delete_project"), - threads) - self.context["tenants"] = {} - - def _delete_users(self): - threads = self.config["resource_management_workers"] - - def publish(queue): - for user in self.context["users"]: - queue.append(user["id"]) - - broker.run(publish, self._get_consumer_for_deletion("delete_user"), - threads) - self.context["users"] = [] - - def create_users(self): - """Create tenants and users, using the broker pattern.""" - threads = self.config["resource_management_workers"] - - LOG.debug("Creating %(tenants)d tenants using %(threads)s threads" - % {"tenants": self.config["tenants"], "threads": threads}) - self.context["tenants"] = self._create_tenants() - - if len(self.context["tenants"]) < self.config["tenants"]: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg="Failed to create the requested number of tenants.") - - users_num = self.config["users_per_tenant"] * self.config["tenants"] - LOG.debug("Creating %(users)d users using %(threads)s threads" - % {"users": users_num, "threads": threads}) - self.context["users"] = self._create_users() - for user in self.context["users"]: - self.context["tenants"][user["tenant_id"]]["users"].append(user) - - if len(self.context["users"]) < users_num: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg="Failed to create the requested number of users.") - - def use_existing_users(self): - LOG.debug("Using existing users for OpenStack platform.") - for user_credential in self.existing_users: - user_credential = credential.OpenStackCredential(**user_credential) - user_clients = osclients.Clients(user_credential) - user_id = user_clients.keystone.auth_ref.user_id - tenant_id = user_clients.keystone.auth_ref.project_id - - if tenant_id not in self.context["tenants"]: - self.context["tenants"][tenant_id] = { - "id": tenant_id, - "name": user_credential.tenant_name - } - - self.context["users"].append({ - "credential": user_credential, - "id": user_id, - "tenant_id": tenant_id - }) - - def setup(self): - self.context["users"] = [] - self.context["tenants"] = {} - self.context["user_choice_method"] = self.config["user_choice_method"] - - if self.existing_users: - self.use_existing_users() - else: - self.create_users() - - def cleanup(self): - """Delete tenants and users, using the broker pattern.""" - if self.existing_users: - # nothing to do here. - return - else: - self._remove_default_security_group() - self._delete_users() - self._delete_tenants() diff --git a/rally/plugins/openstack/context/magnum/__init__.py b/rally/plugins/openstack/context/magnum/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/magnum/ca_certs.py b/rally/plugins/openstack/context/magnum/ca_certs.py deleted file mode 100644 index c0e698b805..0000000000 --- a/rally/plugins/openstack/context/magnum/ca_certs.py +++ /dev/null @@ -1,131 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from cryptography.hazmat import backends -from cryptography.hazmat.primitives.asymmetric import rsa -from cryptography.hazmat.primitives import hashes -from cryptography.hazmat.primitives import serialization -from cryptography import x509 -from cryptography.x509 import oid - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="ca_certs", platform="openstack", order=490) -class CaCertGenerator(context.Context): - """Creates ca certs.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "directory": { - "type": "string", - } - }, - "additionalProperties": False - } - - def _generate_csr_and_key(self): - """Return a dict with a new csr and key.""" - key = rsa.generate_private_key( - public_exponent=65537, - key_size=2048, - backend=backends.default_backend()) - - csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([ - x509.NameAttribute(oid.NameOID.COMMON_NAME, u"Magnum User"), - ])).sign(key, hashes.SHA256(), backends.default_backend()) - - result = { - "csr": csr.public_bytes(encoding=serialization.Encoding.PEM), - "key": key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption()), - } - - return result - - def setup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - magnum_scenario = magnum_utils.MagnumScenario({ - "user": user, - "task": self.context["task"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - - # get the cluster and cluster_template - cluster_uuid = str(self.context["tenants"][tenant_id]["cluster"]) - cluster = magnum_scenario._get_cluster(cluster_uuid) - cluster_template = magnum_scenario._get_cluster_template( - cluster.cluster_template_id) - - if not cluster_template.tls_disabled: - tls = self._generate_csr_and_key() - dir = "" - if self.config.get("directory") is not None: - dir = self.config.get("directory") - self.context["ca_certs_directory"] = dir - fname = os.path.join(dir, cluster_uuid + ".key") - with open(fname, "w") as key_file: - key_file.write(tls["key"]) - # get CA certificate for this cluster - ca_cert = magnum_scenario._get_ca_certificate(cluster_uuid) - fname = os.path.join(dir, cluster_uuid + "_ca.crt") - with open(fname, "w") as ca_cert_file: - ca_cert_file.write(ca_cert.pem) - # send csr to Magnum to have it signed - csr_req = {"cluster_uuid": cluster_uuid, - "csr": tls["csr"]} - cert = magnum_scenario._create_ca_certificate(csr_req) - fname = os.path.join(dir, cluster_uuid + ".crt") - with open(fname, "w") as cert_file: - cert_file.write(cert.pem) - - def cleanup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - magnum_scenario = magnum_utils.MagnumScenario({ - "user": user, - "task": self.context["task"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - - # get the cluster and cluster_template - cluster_uuid = str(self.context["tenants"][tenant_id]["cluster"]) - cluster = magnum_scenario._get_cluster(cluster_uuid) - cluster_template = magnum_scenario._get_cluster_template( - cluster.cluster_template_id) - - if not cluster_template.tls_disabled: - dir = self.context["ca_certs_directory"] - fname = os.path.join(dir, cluster_uuid + ".key") - os.remove(fname) - fname = os.path.join(dir, cluster_uuid + "_ca.crt") - os.remove(fname) - fname = os.path.join(dir, cluster_uuid + ".crt") - os.remove(fname) diff --git a/rally/plugins/openstack/context/magnum/cluster_templates.py b/rally/plugins/openstack/context/magnum/cluster_templates.py deleted file mode 100644 index 589dd78ad6..0000000000 --- a/rally/plugins/openstack/context/magnum/cluster_templates.py +++ /dev/null @@ -1,123 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="cluster_templates", platform="openstack", order=470) -class ClusterTemplateGenerator(context.Context): - """Creates Magnum cluster template.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image_id": { - "type": "string" - }, - "flavor_id": { - "type": "string" - }, - "master_flavor_id": { - "type": "string" - }, - "external_network_id": { - "type": "string" - }, - "fixed_network": { - "type": "string" - }, - "fixed_subnet": { - "type": "string" - }, - "dns_nameserver": { - "type": "string" - }, - "docker_volume_size": { - "type": "integer" - }, - "labels": { - "type": "string" - }, - "coe": { - "type": "string" - }, - "http_proxy": { - "type": "string" - }, - "https_proxy": { - "type": "string" - }, - "no_proxy": { - "type": "string" - }, - "network_driver": { - "type": "string" - }, - "tls_disabled": { - "type": "boolean" - }, - "public": { - "type": "boolean" - }, - "registry_enabled": { - "type": "boolean" - }, - "volume_driver": { - "type": "string" - }, - "server_type": { - "type": "string" - }, - "docker_storage_driver": { - "type": "string" - }, - "master_lb_enabled": { - "type": "boolean" - } - }, - "required": ["image_id", "external_network_id", "coe"], - "additionalProperties": False - } - - def setup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - magnum_scenario = magnum_utils.MagnumScenario({ - "user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - - cluster_template = magnum_scenario._create_cluster_template( - **self.config) - - ct_uuid = cluster_template.uuid - self.context["tenants"][tenant_id]["cluster_template"] = ct_uuid - - def cleanup(self): - resource_manager.cleanup( - names=["magnum.cluster_templates"], - users=self.context.get("users", []), - superclass=magnum_utils.MagnumScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/magnum/clusters.py b/rally/plugins/openstack/context/magnum/clusters.py deleted file mode 100644 index a930f25a67..0000000000 --- a/rally/plugins/openstack/context/magnum/clusters.py +++ /dev/null @@ -1,81 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="clusters", platform="openstack", order=480) -class ClusterGenerator(context.Context): - """Creates specified amount of Magnum clusters.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "cluster_template_uuid": { - "type": "string" - }, - "node_count": { - "type": "integer", - "minimum": 1, - }, - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = {"node_count": 1} - - def setup(self): - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - nova_scenario = nova_utils.NovaScenario({ - "user": user, - "task": self.context["task"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - keypair = nova_scenario._create_keypair() - - magnum_scenario = magnum_utils.MagnumScenario({ - "user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - - # create a cluster - ct_uuid = self.config.get("cluster_template_uuid", None) - if ct_uuid is None: - ctx = self.context["tenants"][tenant_id] - ct_uuid = ctx.get("cluster_template") - cluster = magnum_scenario._create_cluster( - cluster_template=ct_uuid, - node_count=self.config.get("node_count"), keypair=keypair) - self.context["tenants"][tenant_id]["cluster"] = cluster.uuid - - def cleanup(self): - resource_manager.cleanup( - names=["magnum.clusters", "nova.keypairs"], - users=self.context.get("users", []), - superclass=magnum_utils.MagnumScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/manila/__init__.py b/rally/plugins/openstack/context/manila/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/manila/consts.py b/rally/plugins/openstack/context/manila/consts.py deleted file mode 100644 index f38db74bce..0000000000 --- a/rally/plugins/openstack/context/manila/consts.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -SHARES_CONTEXT_NAME = "manila_shares" -SHARE_NETWORKS_CONTEXT_NAME = "manila_share_networks" -SECURITY_SERVICES_CONTEXT_NAME = "manila_security_services" diff --git a/rally/plugins/openstack/context/manila/manila_security_services.py b/rally/plugins/openstack/context/manila/manila_security_services.py deleted file mode 100644 index adabcc138a..0000000000 --- a/rally/plugins/openstack/context/manila/manila_security_services.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally.common import utils -from rally.common import validation -from rally import consts as rally_consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.scenarios.manila import utils as manila_utils -from rally.task import context - -CONF = cfg.CONF -CONTEXT_NAME = consts.SECURITY_SERVICES_CONTEXT_NAME - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name=CONTEXT_NAME, platform="openstack", order=445) -class SecurityServices(context.Context): - """This context creates 'security services' for Manila project.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": rally_consts.JSON_SCHEMA, - "properties": { - "security_services": { - "type": "array", - "description": - "It is expected to be list of dicts with data for creation" - " of security services.", - "items": { - "type": "object", - "properties": {"type": {"enum": ["active_directory", - "kerberos", "ldap"]}}, - "required": ["type"], - "additionalProperties": True, - "description": - "Data for creation of security services. \n " - "Example:\n\n" - " .. code-block:: json\n\n" - " {'type': 'LDAP', 'dns_ip': 'foo_ip', \n" - " 'server': 'bar_ip', 'domain': 'quuz_domain',\n" - " 'user': 'ololo', 'password': 'fake_password'}\n" - } - }, - }, - "additionalProperties": False - } - DEFAULT_CONFIG = { - "security_services": [], - } - - def setup(self): - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - self.context["tenants"][tenant_id][CONTEXT_NAME] = { - "security_services": [], - } - if self.config["security_services"]: - manila_scenario = manila_utils.ManilaScenario({ - "task": self.task, - "owner_id": self.context["owner_id"], - "user": user, - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - for ss in self.config["security_services"]: - inst = manila_scenario._create_security_service( - **ss).to_dict() - self.context["tenants"][tenant_id][CONTEXT_NAME][ - "security_services"].append(inst) - - def cleanup(self): - resource_manager.cleanup( - names=["manila.security_services"], - users=self.context.get("users", []), - superclass=manila_utils.ManilaScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/manila/manila_share_networks.py b/rally/plugins/openstack/context/manila/manila_share_networks.py deleted file mode 100644 index ef15d5275a..0000000000 --- a/rally/plugins/openstack/context/manila/manila_share_networks.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts as rally_consts -from rally import exceptions -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.scenarios.manila import utils as manila_utils -from rally.task import context - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - -CONTEXT_NAME = consts.SHARE_NETWORKS_CONTEXT_NAME - -SHARE_NETWORKS_ARG_DESCR = """ -This context arg will be used only when context arg "use_share_networks" is -set to True. - -If context arg 'share_networks' has values then they will be used else share -networks will be autocreated - one for each tenant network. If networks do not -exist then will be created one share network for each tenant without network -data. - -Expected value is dict of lists where tenant Name or ID is key and list of -share_network Names or IDs is value. Example: - - .. code-block:: json - - "context": { - "manila_share_networks": { - "use_share_networks": true, - "share_networks": { - "tenant_1_name_or_id": ["share_network_1_name_or_id", - "share_network_2_name_or_id"], - "tenant_2_name_or_id": ["share_network_3_name_or_id"]} - } - } - -Also, make sure that all 'existing users' in appropriate registered deployment -have share networks if its usage is enabled, else Rally will randomly take -users that does not satisfy criteria. -""" - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name=CONTEXT_NAME, platform="openstack", order=450) -class ShareNetworks(context.Context): - """This context creates share networks for Manila project.""" - CONFIG_SCHEMA = { - "type": "object", - "$schema": rally_consts.JSON_SCHEMA, - "properties": { - "use_share_networks": { - "type": "boolean", - "description": "Specifies whether manila should use share " - "networks for share creation or not."}, - - "share_networks": { - "type": "object", - "description": SHARE_NETWORKS_ARG_DESCR, - "additionalProperties": True - }, - }, - "additionalProperties": False - } - DEFAULT_CONFIG = { - "use_share_networks": False, - "share_networks": {}, - } - - def _setup_for_existing_users(self): - if (self.config["use_share_networks"] and - not self.config["share_networks"]): - msg = ("Usage of share networks was enabled but for deployment " - "with existing users share networks also should be " - "specified via arg 'share_networks'") - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), msg=msg) - - for tenant_name_or_id, share_networks in self.config[ - "share_networks"].items(): - # Verify project existence - for tenant in self.context["tenants"].values(): - if tenant_name_or_id in (tenant["id"], tenant["name"]): - tenant_id = tenant["id"] - existing_user = None - for user in self.context["users"]: - if user["tenant_id"] == tenant_id: - existing_user = user - break - break - else: - msg = ("Provided tenant Name or ID '%s' was not found in " - "existing tenants.") % tenant_name_or_id - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), msg=msg) - self.context["tenants"][tenant_id][CONTEXT_NAME] = {} - self.context["tenants"][tenant_id][CONTEXT_NAME][ - "share_networks"] = [] - - manila_scenario = manila_utils.ManilaScenario({ - "user": existing_user, - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - existing_sns = manila_scenario._list_share_networks( - detailed=False, search_opts={"project_id": tenant_id}) - - for sn_name_or_id in share_networks: - # Verify share network existence - for sn in existing_sns: - if sn_name_or_id in (sn.id, sn.name): - break - else: - msg = ("Specified share network '%(sn)s' does not " - "exist for tenant '%(tenant_id)s'" - % {"sn": sn_name_or_id, "tenant_id": tenant_id}) - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), msg=msg) - - # Set share network for project - self.context["tenants"][tenant_id][CONTEXT_NAME][ - "share_networks"].append(sn.to_dict()) - - def _setup_for_autocreated_users(self): - # Create share network for each network of tenant - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - networks = self.context["tenants"][tenant_id].get("networks") - manila_scenario = manila_utils.ManilaScenario({ - "task": self.task, - "owner_id": self.get_owner_id(), - "user": user, - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - manila_scenario.RESOURCE_NAME_FORMAT = self.RESOURCE_NAME_FORMAT - self.context["tenants"][tenant_id][CONTEXT_NAME] = { - "share_networks": []} - data = {} - - def _setup_share_network(tenant_id, data): - share_network = manila_scenario._create_share_network( - **data).to_dict() - self.context["tenants"][tenant_id][CONTEXT_NAME][ - "share_networks"].append(share_network) - for ss in self.context["tenants"][tenant_id].get( - consts.SECURITY_SERVICES_CONTEXT_NAME, {}).get( - "security_services", []): - manila_scenario._add_security_service_to_share_network( - share_network["id"], ss["id"]) - - if networks: - for network in networks: - if network.get("cidr"): - data["nova_net_id"] = network["id"] - elif network.get("subnets"): - data["neutron_net_id"] = network["id"] - data["neutron_subnet_id"] = network["subnets"][0] - else: - LOG.warning("Can't determine network service provider." - " Share network will have no data.") - _setup_share_network(tenant_id, data) - else: - _setup_share_network(tenant_id, data) - - def setup(self): - self.context[CONTEXT_NAME] = {} - if not self.config["use_share_networks"]: - pass - elif self.context["config"].get("existing_users"): - self._setup_for_existing_users() - else: - self._setup_for_autocreated_users() - - def cleanup(self): - if (not self.context["config"].get("existing_users") or - self.config["use_share_networks"]): - resource_manager.cleanup( - names=["manila.share_networks"], - users=self.context.get("users", []), - superclass=self.__class__, - api_versions=self.context["config"].get("api_versions"), - task_id=self.get_owner_id()) - else: - # NOTE(vponomaryov): assume that share networks were not created - # by test run. - return diff --git a/rally/plugins/openstack/context/manila/manila_shares.py b/rally/plugins/openstack/context/manila/manila_shares.py deleted file mode 100644 index fe9b59f77a..0000000000 --- a/rally/plugins/openstack/context/manila/manila_shares.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2016 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally.common import utils -from rally.common import validation -from rally import consts as rally_consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.scenarios.manila import utils as manila_utils -from rally.task import context - -CONF = cfg.CONF -CONTEXT_NAME = consts.SHARES_CONTEXT_NAME - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name=CONTEXT_NAME, platform="openstack", order=455) -class Shares(context.Context): - """This context creates shares for Manila project.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": rally_consts.JSON_SCHEMA, - "properties": { - "shares_per_tenant": { - "type": "integer", - "minimum": 1, - }, - "size": { - "type": "integer", - "minimum": 1 - }, - "share_proto": { - "type": "string", - }, - "share_type": { - "type": "string", - }, - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "shares_per_tenant": 1, - "size": 1, - "share_proto": "NFS", - "share_type": None, - } - - def _create_shares(self, manila_scenario, tenant_id, share_proto, size=1, - share_type=None): - tenant_ctxt = self.context["tenants"][tenant_id] - tenant_ctxt.setdefault("shares", []) - for i in range(self.config["shares_per_tenant"]): - kwargs = {"share_proto": share_proto, "size": size} - if share_type: - kwargs["share_type"] = share_type - share_networks = tenant_ctxt.get("manila_share_networks", {}).get( - "share_networks", []) - if share_networks: - kwargs["share_network"] = share_networks[ - i % len(share_networks)]["id"] - share = manila_scenario._create_share(**kwargs) - tenant_ctxt["shares"].append(share.to_dict()) - - def setup(self): - for user, tenant_id in ( - utils.iterate_per_tenants(self.context.get("users", []))): - manila_scenario = manila_utils.ManilaScenario({ - "task": self.task, - "owner_id": self.context["owner_id"], - "user": user, - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - self._create_shares( - manila_scenario, - tenant_id, - self.config["share_proto"], - self.config["size"], - self.config["share_type"], - ) - - def cleanup(self): - resource_manager.cleanup( - names=["manila.shares"], - users=self.context.get("users", []), - superclass=manila_utils.ManilaScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/monasca/__init__.py b/rally/plugins/openstack/context/monasca/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/monasca/metrics.py b/rally/plugins/openstack/context/monasca/metrics.py deleted file mode 100644 index ec706124c7..0000000000 --- a/rally/plugins/openstack/context/monasca/metrics.py +++ /dev/null @@ -1,103 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six import moves - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.scenarios.monasca import utils as monasca_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="monasca_metrics", platform="openstack", order=510) -class MonascaMetricGenerator(context.Context): - """Creates Monasca Metrics.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "name": { - "type": "string" - }, - "dimensions": { - "type": "object", - "properties": { - "region": { - "type": "string" - }, - "service": { - "type": "string" - }, - "hostname": { - "type": "string" - }, - "url": { - "type": "string" - } - }, - "additionalProperties": False - }, - "metrics_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "value_meta": { - "type": "array", - "items": { - "type": "object", - "properties": { - "value_meta_key": { - "type": "string" - }, - "value_meta_value": { - "type": "string" - } - }, - "additionalProperties": False - } - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "metrics_per_tenant": 2 - } - - def setup(self): - new_metric = {} - - if "dimensions" in self.config: - new_metric = { - "dimensions": self.config["dimensions"] - } - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - scenario = monasca_utils.MonascaScenario( - context={"user": user, "task": self.context["task"]} - ) - for i in moves.xrange(self.config["metrics_per_tenant"]): - scenario._create_metrics(**new_metric) - rutils.interruptable_sleep(0.001) - rutils.interruptable_sleep( - monasca_utils.CONF.openstack.monasca_metric_create_prepoll_delay, - atomic_delay=1) - - def cleanup(self): - # We don't have API for removal of metrics - pass diff --git a/rally/plugins/openstack/context/murano/__init__.py b/rally/plugins/openstack/context/murano/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/murano/murano_environments.py b/rally/plugins/openstack/context/murano/murano_environments.py deleted file mode 100644 index 5592852f50..0000000000 --- a/rally/plugins/openstack/context/murano/murano_environments.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.murano import utils as murano_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="murano_environments", platform="openstack", order=402) -class EnvironmentGenerator(context.Context): - """Context class for creating murano environments.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "environments_per_tenant": { - "type": "integer", - "minimum": 1 - }, - }, - "required": ["environments_per_tenant"], - "additionalProperties": False - } - - def setup(self): - for user, tenant_id in utils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id]["environments"] = [] - for i in range(self.config["environments_per_tenant"]): - murano_util = murano_utils.MuranoScenario( - {"user": user, - "task": self.context["task"], - "owner_id": self.context["owner_id"], - "config": self.context["config"]}) - env = murano_util._create_environment() - self.context["tenants"][tenant_id]["environments"].append(env) - - def cleanup(self): - resource_manager.cleanup(names=["murano.environments"], - users=self.context.get("users", []), - superclass=murano_utils.MuranoScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/murano/murano_packages.py b/rally/plugins/openstack/context/murano/murano_packages.py deleted file mode 100644 index 38f428e95a..0000000000 --- a/rally/plugins/openstack/context/murano/murano_packages.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import zipfile - -from rally.common import fileutils -from rally.common import utils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="murano_packages", platform="openstack", order=401) -class PackageGenerator(context.Context): - """Context class for uploading applications for murano.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "app_package": { - "type": "string", - } - }, - "required": ["app_package"], - "additionalProperties": False - } - - def setup(self): - is_config_app_dir = False - pckg_path = os.path.expanduser(self.config["app_package"]) - if zipfile.is_zipfile(pckg_path): - zip_name = pckg_path - elif os.path.isdir(pckg_path): - is_config_app_dir = True - zip_name = fileutils.pack_dir(pckg_path) - else: - msg = "There is no zip archive or directory by this path: %s" - raise exceptions.ContextSetupFailure(msg=msg % pckg_path, - ctx_name=self.get_name()) - - for user, tenant_id in utils.iterate_per_tenants( - self.context["users"]): - clients = osclients.Clients(user["credential"]) - self.context["tenants"][tenant_id]["packages"] = [] - if is_config_app_dir: - self.context["tenants"][tenant_id]["murano_ctx"] = zip_name - # TODO(astudenov): use self.generate_random_name() - package = clients.murano().packages.create( - {"categories": ["Web"], "tags": ["tag"]}, - {"file": open(zip_name)}) - - self.context["tenants"][tenant_id]["packages"].append(package) - - def cleanup(self): - resource_manager.cleanup(names=["murano.packages"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/network/__init__.py b/rally/plugins/openstack/context/network/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/network/allow_ssh.py b/rally/plugins/openstack/context/network/allow_ssh.py deleted file mode 100644 index 0a9c55d62e..0000000000 --- a/rally/plugins/openstack/context/network/allow_ssh.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally.plugins.openstack import osclients -from rally.plugins.openstack.wrappers import network -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -def _prepare_open_secgroup(credential, secgroup_name): - """Generate secgroup allowing all tcp/udp/icmp access. - - In order to run tests on instances it is necessary to have SSH access. - This function generates a secgroup which allows all tcp/udp/icmp access. - - :param credential: clients credential - :param secgroup_name: security group name - - :returns: dict with security group details - """ - neutron = osclients.Clients(credential).neutron() - security_groups = neutron.list_security_groups()["security_groups"] - rally_open = [sg for sg in security_groups if sg["name"] == secgroup_name] - if not rally_open: - descr = "Allow ssh access to VMs created by Rally" - rally_open = neutron.create_security_group( - {"security_group": {"name": secgroup_name, - "description": descr}})["security_group"] - else: - rally_open = rally_open[0] - - rules_to_add = [ - { - "protocol": "tcp", - "port_range_max": 65535, - "port_range_min": 1, - "remote_ip_prefix": "0.0.0.0/0", - "direction": "ingress" - }, - { - "protocol": "udp", - "port_range_max": 65535, - "port_range_min": 1, - "remote_ip_prefix": "0.0.0.0/0", - "direction": "ingress" - }, - { - "protocol": "icmp", - "remote_ip_prefix": "0.0.0.0/0", - "direction": "ingress" - } - ] - - def rule_match(criteria, existing_rule): - return all(existing_rule[key] == value - for key, value in criteria.items()) - - for new_rule in rules_to_add: - if not any(rule_match(new_rule, existing_rule) for existing_rule - in rally_open.get("security_group_rules", [])): - new_rule["security_group_id"] = rally_open["id"] - neutron.create_security_group_rule( - {"security_group_rule": new_rule}) - - return rally_open - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="allow_ssh", platform="openstack", order=320) -class AllowSSH(context.Context): - """Sets up security groups for all users to access VM via SSH.""" - - def setup(self): - admin_or_user = (self.context.get("admin") or - self.context.get("users")[0]) - - net_wrapper = network.wrap( - osclients.Clients(admin_or_user["credential"]), - self, config=self.config) - use_sg, msg = net_wrapper.supports_extension("security-group") - if not use_sg: - LOG.info("Security group context is disabled: %s" % msg) - return - - secgroup_name = self.generate_random_name() - for user in self.context["users"]: - user["secgroup"] = _prepare_open_secgroup(user["credential"], - secgroup_name) - - def cleanup(self): - for user, tenant_id in utils.iterate_per_tenants( - self.context["users"]): - with logging.ExceptionLogger( - LOG, - "Unable to delete security group: %s." - % user["secgroup"]["name"]): - clients = osclients.Clients(user["credential"]) - clients.neutron().delete_security_group(user["secgroup"]["id"]) diff --git a/rally/plugins/openstack/context/network/existing_network.py b/rally/plugins/openstack/context/network/existing_network.py deleted file mode 100644 index 4c6fdbde2f..0000000000 --- a/rally/plugins/openstack/context/network/existing_network.py +++ /dev/null @@ -1,47 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.plugins.openstack import osclients -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="existing_network", platform="openstack", order=349) -class ExistingNetwork(context.Context): - """This context supports using existing networks in Rally. - - This context should be used on a deployment with existing users. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "additionalProperties": False - } - - def setup(self): - for user, tenant_id in utils.iterate_per_tenants( - self.context.get("users", [])): - net_wrapper = network_wrapper.wrap( - osclients.Clients(user["credential"]), self, - config=self.config) - self.context["tenants"][tenant_id]["networks"] = ( - net_wrapper.list_networks()) - - def cleanup(self): - """Networks were not created by Rally, so nothing to do.""" diff --git a/rally/plugins/openstack/context/network/networks.py b/rally/plugins/openstack/context/network/networks.py deleted file mode 100644 index 71714d60fe..0000000000 --- a/rally/plugins/openstack/context/network/networks.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.plugins.openstack import osclients -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -# NOTE(andreykurilin): admin is used only by cleanup -@validation.add("required_platform", platform="openstack", admin=True, - users=True) -@context.configure(name="network", platform="openstack", order=350) -class Network(context.Context): - """Create networking resources. - - This creates networks for all tenants, and optionally creates - another resources like subnets and routers. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "start_cidr": { - "type": "string" - }, - "networks_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "subnets_per_network": { - "type": "integer", - "minimum": 1 - }, - "network_create_args": { - "type": "object", - "additionalProperties": True - }, - "dns_nameservers": { - "type": "array", - "items": {"type": "string"}, - "uniqueItems": True - }, - "dualstack": { - "type": "boolean", - }, - "router": { - "type": "object", - "properties": { - "external": { - "type": "boolean" - }, - "external_gateway_info": { - "description": "The external gateway information .", - "type": "object", - "properties": { - "network_id": {"type": "string"}, - "enable_snat": {"type": "boolean"} - }, - "additionalProperties": False - } - }, - "additionalProperties": False - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "start_cidr": "10.2.0.0/24", - "networks_per_tenant": 1, - "subnets_per_network": 1, - "network_create_args": {}, - "dns_nameservers": None, - "router": {"external": True}, - "dualstack": False - } - - def setup(self): - # NOTE(rkiran): Some clients are not thread-safe. Thus during - # multithreading/multiprocessing, it is likely the - # sockets are left open. This problem is eliminated by - # creating a connection in setup and cleanup separately. - net_wrapper = network_wrapper.wrap( - osclients.Clients(self.context["admin"]["credential"]), - self, config=self.config) - kwargs = {} - if self.config["dns_nameservers"] is not None: - kwargs["dns_nameservers"] = self.config["dns_nameservers"] - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - self.context["tenants"][tenant_id]["networks"] = [] - for i in range(self.config["networks_per_tenant"]): - # NOTE(amaretskiy): router_create_args and subnets_num take - # effect for Neutron only. - network_create_args = self.config["network_create_args"].copy() - network = net_wrapper.create_network( - tenant_id, - dualstack=self.config["dualstack"], - subnets_num=self.config["subnets_per_network"], - network_create_args=network_create_args, - router_create_args=self.config["router"], - **kwargs) - self.context["tenants"][tenant_id]["networks"].append(network) - - def cleanup(self): - net_wrapper = network_wrapper.wrap( - osclients.Clients(self.context["admin"]["credential"]), - self, config=self.config) - for tenant_id, tenant_ctx in self.context["tenants"].items(): - for network in tenant_ctx.get("networks", []): - with logging.ExceptionLogger( - LOG, - "Failed to delete network for tenant %s" % tenant_id): - net_wrapper.delete_network(network) diff --git a/rally/plugins/openstack/context/network/routers.py b/rally/plugins/openstack/context/network/routers.py deleted file mode 100644 index d5ff6d70b1..0000000000 --- a/rally/plugins/openstack/context/network/routers.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2017: Orange -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", admin=True, - users=True) -@context.configure(name="router", platform="openstack", order=351) -class Router(context.Context): - """Create networking resources. - - This creates router for all tenants. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "routers_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "admin_state_up ": { - "description": "A human-readable description for the resource", - "type": "boolean", - }, - "external_gateway_info": { - "description": "The external gateway information .", - "type": "object", - "properties": { - "network_id": {"type": "string"}, - "enable_snat": {"type": "boolean"} - }, - "additionalProperties": False - }, - "network_id": { - "description": "Network ID", - "type": "string" - }, - "external_fixed_ips": { - "description": "Ip(s) of the external gateway interface.", - "type": "array", - "items": { - "type": "object", - "properties": { - "ip_address": {"type": "string"}, - "subnet_id": {"type": "string"} - }, - "additionalProperties": False, - } - }, - "distributed": { - "description": "Distributed router. Require dvr extension.", - "type": "boolean" - }, - "ha": { - "description": "Highly-available router. Require l3-ha.", - "type": "boolean" - }, - "availability_zone_hints": { - "description": "Require router_availability_zone extension.", - "type": "boolean" - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "routers_per_tenant": 1, - } - - def setup(self): - kwargs = {} - parameters = ("admin_state_up", "external_gateway_info", "network_id", - "external_fixed_ips", "distributed", "ha", - "availability_zone_hints") - for parameter in parameters: - if parameter in self.config: - kwargs[parameter] = self.config[parameter] - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - self.context["tenants"][tenant_id]["routers"] = [] - scenario = neutron_utils.NeutronScenario( - context={"user": user, "task": self.context["task"], - "owner_id": self.context["owner_id"]} - ) - for i in range(self.config["routers_per_tenant"]): - router = scenario._create_router(kwargs) - self.context["tenants"][tenant_id]["routers"].append(router) - - def cleanup(self): - resource_manager.cleanup( - names=["neutron.router"], - users=self.context.get("users", []), - superclass=neutron_utils.NeutronScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/neutron/__init__.py b/rally/plugins/openstack/context/neutron/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/neutron/lbaas.py b/rally/plugins/openstack/context/neutron/lbaas.py deleted file mode 100644 index 8ba5efc143..0000000000 --- a/rally/plugins/openstack/context/neutron/lbaas.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import utils -from rally.common import validation -from rally import consts -from rally.plugins.openstack import osclients -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True, - users=True) -@context.configure(name="lbaas", platform="openstack", order=360) -class Lbaas(context.Context): - """Creates a lb-pool for every subnet created in network context.""" - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "pool": { - "type": "object", - "additionalProperties": True - }, - "lbaas_version": { - "type": "integer", - "minimum": 1 - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "pool": { - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP" - }, - "lbaas_version": 1 - } - - def setup(self): - net_wrapper = network_wrapper.wrap( - osclients.Clients(self.context["admin"]["credential"]), - self, config=self.config) - - use_lb, msg = net_wrapper.supports_extension("lbaas") - if not use_lb: - LOG.info(msg) - return - - # Creates a lb-pool for every subnet created in network context. - for user, tenant_id in (utils.iterate_per_tenants( - self.context.get("users", []))): - for network in self.context["tenants"][tenant_id]["networks"]: - for subnet in network.get("subnets", []): - if self.config["lbaas_version"] == 1: - network.setdefault("lb_pools", []).append( - net_wrapper.create_v1_pool( - tenant_id, - subnet, - **self.config["pool"])) - else: - raise NotImplementedError( - "Context for LBaaS version %s not implemented." - % self.config["lbaas_version"]) - - def cleanup(self): - net_wrapper = network_wrapper.wrap( - osclients.Clients(self.context["admin"]["credential"]), - self, config=self.config) - for tenant_id, tenant_ctx in self.context["tenants"].items(): - for network in tenant_ctx.get("networks", []): - for pool in network.get("lb_pools", []): - with logging.ExceptionLogger( - LOG, - "Failed to delete pool %(pool)s for tenant " - "%(tenant)s" % {"pool": pool["pool"]["id"], - "tenant": tenant_id}): - if self.config["lbaas_version"] == 1: - net_wrapper.delete_v1_pool(pool["pool"]["id"]) diff --git a/rally/plugins/openstack/context/nova/__init__.py b/rally/plugins/openstack/context/nova/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/nova/flavors.py b/rally/plugins/openstack/context/nova/flavors.py deleted file mode 100644 index 5b646a24ec..0000000000 --- a/rally/plugins/openstack/context/nova/flavors.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.task import context - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True) -@context.configure(name="flavors", platform="openstack", order=340) -class FlavorsGenerator(context.Context): - """Context creates a list of flavors.""" - - CONFIG_SCHEMA = { - "type": "array", - "$schema": consts.JSON_SCHEMA, - "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - }, - "ram": { - "type": "integer", - "minimum": 1 - }, - "vcpus": { - "type": "integer", - "minimum": 1 - }, - "disk": { - "type": "integer", - "minimum": 0 - }, - "swap": { - "type": "integer", - "minimum": 0 - }, - "ephemeral": { - "type": "integer", - "minimum": 0 - }, - "extra_specs": { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": False, - "required": ["name", "ram"] - } - } - - def setup(self): - """Create list of flavors.""" - from novaclient import exceptions as nova_exceptions - - self.context["flavors"] = {} - - clients = osclients.Clients(self.context["admin"]["credential"]) - for flavor_config in self.config: - - extra_specs = flavor_config.get("extra_specs") - - flavor_config = FlavorConfig(**flavor_config) - try: - flavor = clients.nova().flavors.create(**flavor_config) - except nova_exceptions.Conflict: - msg = "Using existing flavor %s" % flavor_config["name"] - if logging.is_debug(): - LOG.exception(msg) - else: - LOG.warning(msg) - continue - - if extra_specs: - flavor.set_keys(extra_specs) - - self.context["flavors"][flavor_config["name"]] = flavor.to_dict() - LOG.debug("Created flavor with id '%s'" % flavor.id) - - def cleanup(self): - """Delete created flavors.""" - mather = rutils.make_name_matcher(*[f["name"] for f in self.config]) - resource_manager.cleanup( - names=["nova.flavors"], - admin=self.context["admin"], - api_versions=self.context["config"].get("api_versions"), - superclass=mather, - task_id=self.get_owner_id()) - - -class FlavorConfig(dict): - def __init__(self, name, ram, vcpus=1, disk=0, swap=0, ephemeral=0, - extra_specs=None): - """Flavor configuration for context and flavor & image validation code. - - Context code uses this code to provide default values for flavor - creation. Validation code uses this class as a Flavor instance to - check image validity against a flavor that is to be created by - the context. - - :param name: name of the newly created flavor - :param ram: RAM amount for the flavor (MBs) - :param vcpus: VCPUs amount for the flavor - :param disk: disk amount for the flavor (GBs) - :param swap: swap amount for the flavor (MBs) - :param ephemeral: ephemeral disk amount for the flavor (GBs) - :param extra_specs: is ignored - """ - super(FlavorConfig, self).__init__( - name=name, ram=ram, vcpus=vcpus, disk=disk, - swap=swap, ephemeral=ephemeral) - self.__dict__.update(self) diff --git a/rally/plugins/openstack/context/nova/keypairs.py b/rally/plugins/openstack/context/nova/keypairs.py deleted file mode 100644 index 7ad344d74e..0000000000 --- a/rally/plugins/openstack/context/nova/keypairs.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2014: Rackspace UK -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import validation -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="keypair", platform="openstack", order=310) -class Keypair(context.Context): - """Create Nova KeyPair for each user.""" - - # NOTE(andreykurilin): "type" != "null", since we need to support backward - # compatibility(previously empty dict was valid) and I hope in near - # future, we will extend this context to accept keys. - CONFIG_SCHEMA = {"type": "object", - "additionalProperties": False} - - def _generate_keypair(self, credential): - nova_client = osclients.Clients(credential).nova() - # NOTE(hughsaunders): If keypair exists, it should re-generate name. - - keypairs = nova_client.keypairs.list() - keypair_names = [keypair.name for keypair in keypairs] - while True: - keypair_name = self.generate_random_name() - if keypair_name not in keypair_names: - break - - keypair = nova_client.keypairs.create(keypair_name) - return {"private": keypair.private_key, - "public": keypair.public_key, - "name": keypair_name, - "id": keypair.id} - - def setup(self): - for user in self.context["users"]: - user["keypair"] = self._generate_keypair(user["credential"]) - - def cleanup(self): - resource_manager.cleanup(names=["nova.keypairs"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/nova/servers.py b/rally/plugins/openstack/context/nova/servers.py deleted file mode 100755 index 076841cf75..0000000000 --- a/rally/plugins/openstack/context/nova/servers.py +++ /dev/null @@ -1,140 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import utils as rutils -from rally.common import validation -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.plugins.openstack import types -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="servers", platform="openstack", order=430) -class ServerGenerator(context.Context): - """Creates specified amount of Nova Servers per each tenant.""" - - CONFIG_SCHEMA = { - "type": "object", - "properties": { - "image": { - "description": "Name of image to boot server(s) from.", - "type": "object", - "properties": { - "name": {"type": "string"} - }, - "additionalProperties": False - }, - "flavor": { - "description": "Name of flavor to boot server(s) with.", - "type": "object", - "properties": { - "name": {"type": "string"} - }, - "additionalProperties": False - }, - "servers_per_tenant": { - "description": "Number of servers to boot in each Tenant.", - "type": "integer", - "minimum": 1 - }, - "auto_assign_nic": { - "description": "True if NICs should be assigned.", - "type": "boolean", - }, - "nics": { - "type": "array", - "description": "List of networks to attach to server.", - "items": {"oneOf": [ - { - "type": "object", - "properties": {"net-id": {"type": "string"}}, - "description": "Network ID in a format like OpenStack " - "API expects to see.", - "additionalProperties": False - }, - { - "type": "string", - "description": "Network ID." - } - ]}, - "minItems": 1 - } - }, - "required": ["image", "flavor"], - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "servers_per_tenant": 5, - "auto_assign_nic": False - } - - def setup(self): - image = self.config["image"] - flavor = self.config["flavor"] - auto_nic = self.config["auto_assign_nic"] - servers_per_tenant = self.config["servers_per_tenant"] - kwargs = {} - if self.config.get("nics"): - if isinstance(self.config["nics"][0], dict): - # it is a format that Nova API expects - kwargs["nics"] = list(self.config["nics"]) - else: - kwargs["nics"] = [{"net-id": nic} - for nic in self.config["nics"]] - - image_id = types.GlanceImage(self.context).pre_process( - resource_spec=image, config={}) - flavor_id = types.Flavor(self.context).pre_process( - resource_spec=flavor, config={}) - - for iter_, (user, tenant_id) in enumerate(rutils.iterate_per_tenants( - self.context["users"])): - LOG.debug("Booting servers for user tenant %s" % user["tenant_id"]) - tmp_context = {"user": user, - "tenant": self.context["tenants"][tenant_id], - "task": self.context["task"], - "owner_id": self.context["owner_id"], - "iteration": iter_} - nova_scenario = nova_utils.NovaScenario(tmp_context) - - LOG.debug("Calling _boot_servers with image_id=%(image_id)s " - "flavor_id=%(flavor_id)s " - "servers_per_tenant=%(servers_per_tenant)s" - % {"image_id": image_id, - "flavor_id": flavor_id, - "servers_per_tenant": servers_per_tenant}) - - servers = nova_scenario._boot_servers(image_id, flavor_id, - requests=servers_per_tenant, - auto_assign_nic=auto_nic, - **kwargs) - - current_servers = [server.id for server in servers] - - LOG.debug("Adding booted servers %s to context" % current_servers) - - self.context["tenants"][tenant_id][ - "servers"] = current_servers - - def cleanup(self): - resource_manager.cleanup(names=["nova.servers"], - users=self.context.get("users", []), - superclass=nova_utils.NovaScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/quotas/__init__.py b/rally/plugins/openstack/context/quotas/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/quotas/cinder_quotas.py b/rally/plugins/openstack/context/quotas/cinder_quotas.py deleted file mode 100644 index 5015683358..0000000000 --- a/rally/plugins/openstack/context/quotas/cinder_quotas.py +++ /dev/null @@ -1,58 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class CinderQuotas(object): - """Management of Cinder quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "gigabytes": { - "type": "integer", - "minimum": -1 - }, - "snapshots": { - "type": "integer", - "minimum": -1 - }, - "volumes": { - "type": "integer", - "minimum": -1 - }, - "backups": { - "type": "integer", - "minimum": -1 - }, - "backup_gigabytes": { - "type": "integer", - "minimum": -1 - } - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - self.clients.cinder().quotas.update(tenant_id, **kwargs) - - def delete(self, tenant_id): - self.clients.cinder().quotas.delete(tenant_id) - - def get(self, tenant_id): - response = self.clients.cinder().quotas.get(tenant_id) - return dict([(k, getattr(response, k)) - for k in self.QUOTAS_SCHEMA["properties"]]) diff --git a/rally/plugins/openstack/context/quotas/designate_quotas.py b/rally/plugins/openstack/context/quotas/designate_quotas.py deleted file mode 100644 index b2c647d9b1..0000000000 --- a/rally/plugins/openstack/context/quotas/designate_quotas.py +++ /dev/null @@ -1,56 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class DesignateQuotas(object): - """Management of Designate quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "domains": { - "type": "integer", - "minimum": 1 - }, - "domain_recordsets": { - "type": "integer", - "minimum": 1 - }, - "domain_records": { - "type": "integer", - "minimum": 1 - }, - "recordset_records": { - "type": "integer", - "minimum": 1 - }, - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - self.clients.designate().quotas.update(tenant_id, kwargs) - - def delete(self, tenant_id): - self.clients.designate().quotas.reset(tenant_id) - - def get(self, tenant_id): - # NOTE(andreykurilin): we have broken designate jobs, so I can't check - # that this method is right :( - response = self.clients.designate().quotas.get(tenant_id) - return dict([(k, response.get(k)) - for k in self.QUOTAS_SCHEMA["properties"]]) diff --git a/rally/plugins/openstack/context/quotas/manila_quotas.py b/rally/plugins/openstack/context/quotas/manila_quotas.py deleted file mode 100644 index 0c36c45ea3..0000000000 --- a/rally/plugins/openstack/context/quotas/manila_quotas.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class ManilaQuotas(object): - """Management of Manila quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "shares": { - "type": "integer", - "minimum": -1 - }, - "gigabytes": { - "type": "integer", - "minimum": -1 - }, - "snapshots": { - "type": "integer", - "minimum": -1 - }, - "snapshot_gigabytes": { - "type": "integer", - "minimum": -1 - }, - "share_networks": { - "type": "integer", - "minimum": -1 - } - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - self.clients.manila().quotas.update(tenant_id, **kwargs) - - def delete(self, tenant_id): - self.clients.manila().quotas.delete(tenant_id) - - def get(self, tenant_id): - response = self.clients.manila().quotas.get(tenant_id) - return dict([(k, getattr(response, k)) - for k in self.QUOTAS_SCHEMA["properties"]]) diff --git a/rally/plugins/openstack/context/quotas/neutron_quotas.py b/rally/plugins/openstack/context/quotas/neutron_quotas.py deleted file mode 100644 index f24d3a2bca..0000000000 --- a/rally/plugins/openstack/context/quotas/neutron_quotas.py +++ /dev/null @@ -1,78 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class NeutronQuotas(object): - """Management of Neutron quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "network": { - "type": "integer", - "minimum": -1 - }, - "subnet": { - "type": "integer", - "minimum": -1 - }, - "port": { - "type": "integer", - "minimum": -1 - }, - "router": { - "type": "integer", - "minimum": -1 - }, - "floatingip": { - "type": "integer", - "minimum": -1 - }, - "security_group": { - "type": "integer", - "minimum": -1 - }, - "security_group_rule": { - "type": "integer", - "minimum": -1 - }, - "pool": { - "type": "integer", - "minimum": -1 - }, - "vip": { - "type": "integer", - "minimum": -1 - }, - "health_monitor": { - "type": "integer", - "minimum": -1 - } - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - body = {"quota": kwargs} - self.clients.neutron().update_quota(tenant_id, body=body) - - def delete(self, tenant_id): - # Reset quotas to defaults and tag database objects as deleted - self.clients.neutron().delete_quota(tenant_id) - - def get(self, tenant_id): - return self.clients.neutron().show_quota(tenant_id)["quota"] diff --git a/rally/plugins/openstack/context/quotas/nova_quotas.py b/rally/plugins/openstack/context/quotas/nova_quotas.py deleted file mode 100644 index 065a3a7444..0000000000 --- a/rally/plugins/openstack/context/quotas/nova_quotas.py +++ /dev/null @@ -1,95 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class NovaQuotas(object): - """Management of Nova quotas.""" - - QUOTAS_SCHEMA = { - "type": "object", - "additionalProperties": False, - "properties": { - "instances": { - "type": "integer", - "minimum": -1 - }, - "cores": { - "type": "integer", - "minimum": -1 - }, - "ram": { - "type": "integer", - "minimum": -1 - }, - "floating_ips": { - "type": "integer", - "minimum": -1 - }, - "fixed_ips": { - "type": "integer", - "minimum": -1 - }, - "metadata_items": { - "type": "integer", - "minimum": -1 - }, - "injected_files": { - "type": "integer", - "minimum": -1 - }, - "injected_file_content_bytes": { - "type": "integer", - "minimum": -1 - }, - "injected_file_path_bytes": { - "type": "integer", - "minimum": -1 - }, - "key_pairs": { - "type": "integer", - "minimum": -1 - }, - "security_groups": { - "type": "integer", - "minimum": -1 - }, - "security_group_rules": { - "type": "integer", - "minimum": -1 - }, - "server_groups": { - "type": "integer", - "minimum": -1 - }, - "server_group_members": { - "type": "integer", - "minimum": -1 - } - } - } - - def __init__(self, clients): - self.clients = clients - - def update(self, tenant_id, **kwargs): - self.clients.nova().quotas.update(tenant_id, **kwargs) - - def delete(self, tenant_id): - # Reset quotas to defaults and tag database objects as deleted - self.clients.nova().quotas.delete(tenant_id) - - def get(self, tenant_id): - response = self.clients.nova().quotas.get(tenant_id) - return dict([(k, getattr(response, k)) - for k in self.QUOTAS_SCHEMA["properties"]]) diff --git a/rally/plugins/openstack/context/quotas/quotas.py b/rally/plugins/openstack/context/quotas/quotas.py deleted file mode 100644 index 642dfa44b1..0000000000 --- a/rally/plugins/openstack/context/quotas/quotas.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2014: Dassault Systemes -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import validation -from rally import consts -from rally.plugins.openstack.context.quotas import cinder_quotas -from rally.plugins.openstack.context.quotas import designate_quotas -from rally.plugins.openstack.context.quotas import manila_quotas -from rally.plugins.openstack.context.quotas import neutron_quotas -from rally.plugins.openstack.context.quotas import nova_quotas -from rally.plugins.openstack import osclients -from rally.task import context - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", admin=True) -@context.configure(name="quotas", platform="openstack", order=300) -class Quotas(context.Context): - """Sets OpenStack Tenants quotas.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "additionalProperties": False, - "properties": { - "nova": nova_quotas.NovaQuotas.QUOTAS_SCHEMA, - "cinder": cinder_quotas.CinderQuotas.QUOTAS_SCHEMA, - "manila": manila_quotas.ManilaQuotas.QUOTAS_SCHEMA, - "designate": designate_quotas.DesignateQuotas.QUOTAS_SCHEMA, - "neutron": neutron_quotas.NeutronQuotas.QUOTAS_SCHEMA - } - } - - def __init__(self, ctx): - super(Quotas, self).__init__(ctx) - self.clients = osclients.Clients( - self.context["admin"]["credential"], - api_info=self.context["config"].get("api_versions")) - - self.manager = { - "nova": nova_quotas.NovaQuotas(self.clients), - "cinder": cinder_quotas.CinderQuotas(self.clients), - "manila": manila_quotas.ManilaQuotas(self.clients), - "designate": designate_quotas.DesignateQuotas(self.clients), - "neutron": neutron_quotas.NeutronQuotas(self.clients) - } - self.original_quotas = [] - - def _service_has_quotas(self, service): - return len(self.config.get(service, {})) > 0 - - def setup(self): - for tenant_id in self.context["tenants"]: - for service in self.manager: - if self._service_has_quotas(service): - # NOTE(andreykurilin): in case of existing users it is - # required to restore original quotas instead of reset - # to default ones. - if "existing_users" in self.context: - self.original_quotas.append( - (service, tenant_id, - self.manager[service].get(tenant_id))) - self.manager[service].update(tenant_id, - **self.config[service]) - - def _restore_quotas(self): - for service, tenant_id, quotas in self.original_quotas: - try: - self.manager[service].update(tenant_id, **quotas) - except Exception as e: - LOG.warning("Failed to restore quotas for tenant %(tenant_id)s" - " in service %(service)s \n reason: %(exc)s" % - {"tenant_id": tenant_id, "service": service, - "exc": e}) - - def _delete_quotas(self): - for service in self.manager: - if self._service_has_quotas(service): - for tenant_id in self.context["tenants"]: - try: - self.manager[service].delete(tenant_id) - except Exception as e: - LOG.warning( - "Failed to remove quotas for tenant %(tenant)s " - "in service %(service)s reason: %(e)s" % - {"tenant": tenant_id, "service": service, "e": e}) - - def cleanup(self): - if self.original_quotas: - # existing users - self._restore_quotas() - else: - self._delete_quotas() diff --git a/rally/plugins/openstack/context/sahara/__init__.py b/rally/plugins/openstack/context/sahara/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/sahara/sahara_cluster.py b/rally/plugins/openstack/context/sahara/sahara_cluster.py deleted file mode 100644 index 9e28845161..0000000000 --- a/rally/plugins/openstack/context/sahara/sahara_cluster.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import context -from rally.task import utils as bench_utils - - -CONF = cfg.CONF - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_cluster", platform="openstack", order=441) -class SaharaCluster(context.Context): - """Context class for setting up the Cluster an EDP job.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "plugin_name": { - "type": "string" - }, - "hadoop_version": { - "type": "string", - }, - "workers_count": { - "type": "integer", - "minimum": 1 - }, - "flavor_id": { - "type": "string", - }, - "master_flavor_id": { - "type": "string", - }, - "worker_flavor_id": { - "type": "string", - }, - "floating_ip_pool": { - "type": "string", - }, - "volumes_per_node": { - "type": "integer", - "minimum": 1 - }, - "volumes_size": { - "type": "integer", - "minimum": 1 - }, - "auto_security_group": { - "type": "boolean", - }, - "security_groups": { - "type": "array", - "items": { - "type": "string" - } - }, - "node_configs": { - "type": "object", - "additionalProperties": True - }, - "cluster_configs": { - "type": "object", - "additionalProperties": True - }, - "enable_anti_affinity": { - "type": "boolean" - }, - "enable_proxy": { - "type": "boolean" - }, - "use_autoconfig": { - "type": "boolean" - }, - }, - "additionalProperties": False, - "required": ["plugin_name", "hadoop_version", "workers_count", - "master_flavor_id", "worker_flavor_id"] - } - - def setup(self): - utils.init_sahara_context(self) - self.context["sahara"]["clusters"] = {} - - wait_dict = {} - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - image_id = self.context["tenants"][tenant_id]["sahara"]["image"] - - floating_ip_pool = self.config.get("floating_ip_pool") - - temporary_context = { - "user": user, - "tenant": self.context["tenants"][tenant_id], - "task": self.context["task"], - "owner_id": self.context["owner_id"] - } - scenario = utils.SaharaScenario(context=temporary_context) - - cluster = scenario._launch_cluster( - plugin_name=self.config["plugin_name"], - hadoop_version=self.config["hadoop_version"], - flavor_id=self.config.get("flavor_id"), - master_flavor_id=self.config["master_flavor_id"], - worker_flavor_id=self.config["worker_flavor_id"], - workers_count=self.config["workers_count"], - image_id=image_id, - floating_ip_pool=floating_ip_pool, - volumes_per_node=self.config.get("volumes_per_node"), - volumes_size=self.config.get("volumes_size", 1), - auto_security_group=self.config.get("auto_security_group", - True), - security_groups=self.config.get("security_groups"), - node_configs=self.config.get("node_configs"), - cluster_configs=self.config.get("cluster_configs"), - enable_anti_affinity=self.config.get("enable_anti_affinity", - False), - enable_proxy=self.config.get("enable_proxy", False), - wait_active=False, - use_autoconfig=self.config.get("use_autoconfig", True) - ) - - self.context["tenants"][tenant_id]["sahara"]["cluster"] = ( - cluster.id) - - # Need to save the client instance to poll for active status - wait_dict[cluster] = scenario.clients("sahara") - - bench_utils.wait_for( - resource=wait_dict, - update_resource=self.update_clusters_dict, - is_ready=self.all_clusters_active, - timeout=CONF.openstack.sahara_cluster_create_timeout, - check_interval=CONF.openstack.sahara_cluster_check_interval) - - def update_clusters_dict(self, dct): - new_dct = {} - for cluster, client in dct.items(): - new_cl = client.clusters.get(cluster.id) - new_dct[new_cl] = client - - return new_dct - - def all_clusters_active(self, dct): - for cluster, client in dct.items(): - cluster_status = cluster.status.lower() - if cluster_status == "error": - msg = ("Sahara cluster %(name)s has failed to" - " %(action)s. Reason: '%(reason)s'" - % {"name": cluster.name, "action": "start", - "reason": cluster.status_description}) - raise exceptions.ContextSetupFailure(ctx_name=self.get_name(), - msg=msg) - elif cluster_status != "active": - return False - return True - - def cleanup(self): - resource_manager.cleanup(names=["sahara.clusters"], - users=self.context.get("users", []), - superclass=utils.SaharaScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/sahara/sahara_image.py b/rally/plugins/openstack/context/sahara/sahara_image.py deleted file mode 100644 index 9aa33feffd..0000000000 --- a/rally/plugins/openstack/context/sahara/sahara_image.py +++ /dev/null @@ -1,130 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.plugins.openstack.scenarios.sahara import utils -from rally.plugins.openstack.services.image import image as image_services -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_image", platform="openstack", order=440) -class SaharaImage(context.Context): - """Context class for adding and tagging Sahara images.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image_uuid": { - "type": "string" - }, - "image_url": { - "type": "string", - }, - "username": { - "type": "string" - }, - "plugin_name": { - "type": "string", - }, - "hadoop_version": { - "type": "string", - } - }, - "oneOf": [ - {"description": "Create an image.", - "required": ["image_url", "username", "plugin_name", - "hadoop_version"]}, - {"description": "Use an existing image.", - "required": ["image_uuid"]} - ], - "additionalProperties": False - } - - def _create_image(self, hadoop_version, image_url, plugin_name, user, - user_name): - clients = osclients.Clients( - user["credential"], - api_info=self.context["config"].get("api_versions")) - image_service = image_services.Image( - clients, name_generator=self.generate_random_name) - image = image_service.create_image(container_format="bare", - image_location=image_url, - disk_format="qcow2") - clients.sahara().images.update_image( - image_id=image.id, user_name=user_name, desc="") - clients.sahara().images.update_tags( - image_id=image.id, new_tags=[plugin_name, hadoop_version]) - return image.id - - def setup(self): - utils.init_sahara_context(self) - self.context["sahara"]["images"] = {} - - # The user may want to use the existing image. In this case he should - # make sure that the image is public and has all required metadata. - image_uuid = self.config.get("image_uuid") - - self.context["sahara"]["need_image_cleanup"] = not image_uuid - - if image_uuid: - # Using the first user to check the existing image. - user = self.context["users"][0] - clients = osclients.Clients(user["credential"]) - - image = clients.glance().images.get(image_uuid) - - visibility = None - if hasattr(image, "is_public"): - visibility = "public" if image.is_public else "private" - else: - visibility = image["visibility"] - - if visibility != "public": - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg="Use only public image for sahara_image context" - ) - image_id = image_uuid - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - self.context["tenants"][tenant_id]["sahara"]["image"] = ( - image_id) - else: - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - image_id = self._create_image( - hadoop_version=self.config["hadoop_version"], - image_url=self.config["image_url"], - plugin_name=self.config["plugin_name"], - user=user, - user_name=self.config["username"]) - - self.context["tenants"][tenant_id]["sahara"]["image"] = ( - image_id) - - def cleanup(self): - if self.context["sahara"]["need_image_cleanup"]: - resource_manager.cleanup(names=["glance.images"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/sahara/sahara_input_data_sources.py b/rally/plugins/openstack/context/sahara/sahara_input_data_sources.py deleted file mode 100644 index 6fc178c321..0000000000 --- a/rally/plugins/openstack/context/sahara/sahara_input_data_sources.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import requests -from six.moves.urllib import parse - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.plugins.openstack.scenarios.sahara import utils -from rally.plugins.openstack.scenarios.swift import utils as swift_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_input_data_sources", platform="openstack", - order=443) -class SaharaInputDataSources(context.Context): - """Context class for setting up Input Data Sources for an EDP job.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "input_type": { - "enum": ["swift", "hdfs"], - }, - "input_url": { - "type": "string", - }, - "swift_files": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "download_url": { - "type": "string" - } - }, - "additionalProperties": False, - "required": ["name", "download_url"] - } - } - }, - "additionalProperties": False, - "required": ["input_type", "input_url"] - } - - def setup(self): - utils.init_sahara_context(self) - self.context["sahara"]["swift_objects"] = [] - self.context["sahara"]["container_name"] = None - - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - clients = osclients.Clients(user["credential"]) - if self.config["input_type"] == "swift": - self.setup_inputs_swift(clients, tenant_id, - self.config["input_url"], - self.config["swift_files"], - user["credential"].username, - user["credential"].password) - else: - self.setup_inputs(clients, tenant_id, - self.config["input_type"], - self.config["input_url"]) - - def setup_inputs(self, clients, tenant_id, input_type, input_url): - input_ds = clients.sahara().data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type=input_type, - url=input_url) - - self.context["tenants"][tenant_id]["sahara"]["input"] = input_ds.id - - def setup_inputs_swift(self, clients, tenant_id, input_url, - swift_files, username, password): - swift_scenario = swift_utils.SwiftScenario(clients=clients, - context=self.context) - # TODO(astudenov): use self.generate_random_name() - container_name = "rally_" + parse.urlparse(input_url).netloc.rstrip( - ".sahara") - self.context["sahara"]["container_name"] = ( - swift_scenario._create_container(container_name=container_name)) - for swift_file in swift_files: - content = requests.get(swift_file["download_url"]).content - self.context["sahara"]["swift_objects"].append( - swift_scenario._upload_object( - self.context["sahara"]["container_name"], content, - object_name=swift_file["name"])) - input_ds_swift = clients.sahara().data_sources.create( - name=self.generate_random_name(), description="", - data_source_type="swift", url=input_url, - credential_user=username, credential_pass=password) - - self.context["tenants"][tenant_id]["sahara"]["input"] = ( - input_ds_swift.id) - - def cleanup(self): - resource_manager.cleanup( - names=["swift.object", "swift.container"], - users=self.context.get("users", []), - superclass=swift_utils.SwiftScenario, - task_id=self.get_owner_id()) - resource_manager.cleanup( - names=["sahara.data_sources"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/sahara/sahara_job_binaries.py b/rally/plugins/openstack/context/sahara/sahara_job_binaries.py deleted file mode 100644 index 4bd6bc39d1..0000000000 --- a/rally/plugins/openstack/context/sahara/sahara_job_binaries.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import requests - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_job_binaries", platform="openstack", order=442) -class SaharaJobBinaries(context.Context): - """Context class for setting up Job Binaries for an EDP job.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "mains": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "download_url": { - "type": "string" - } - }, - "additionalProperties": False, - "required": ["name", "download_url"] - } - }, - "libs": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "download_url": { - "type": "string" - } - }, - "additionalProperties": False, - "required": ["name", "download_url"] - } - } - }, - "additionalProperties": False - } - - # This cache will hold the downloaded libs content to prevent repeated - # downloads for each tenant - lib_cache = {} - - def setup(self): - utils.init_sahara_context(self) - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - clients = osclients.Clients(user["credential"]) - sahara = clients.sahara() - - self.context["tenants"][tenant_id]["sahara"]["mains"] = [] - self.context["tenants"][tenant_id]["sahara"]["libs"] = [] - - for main in self.config.get("mains", []): - self.download_and_save_lib( - sahara=sahara, - lib_type="mains", - name=main["name"], - download_url=main["download_url"], - tenant_id=tenant_id) - - for lib in self.config.get("libs", []): - self.download_and_save_lib( - sahara=sahara, - lib_type="libs", - name=lib["name"], - download_url=lib["download_url"], - tenant_id=tenant_id) - - def setup_inputs(self, sahara, tenant_id, input_type, input_url): - if input_type == "swift": - raise exceptions.RallyException( - "Swift Data Sources are not implemented yet") - # Todo(nkonovalov): Add swift credentials parameters and data upload - input_ds = sahara.data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type=input_type, - url=input_url) - - self.context["tenants"][tenant_id]["sahara"]["input"] = input_ds.id - - def download_and_save_lib(self, sahara, lib_type, name, download_url, - tenant_id): - if download_url not in self.lib_cache: - lib_data = requests.get(download_url).content - self.lib_cache[download_url] = lib_data - else: - lib_data = self.lib_cache[download_url] - - job_binary_internal = sahara.job_binary_internals.create( - name=name, - data=lib_data) - - url = "internal-db://%s" % job_binary_internal.id - job_binary = sahara.job_binaries.create(name=name, - url=url, - description="", - extra={}) - - self.context["tenants"][tenant_id]["sahara"][lib_type].append( - job_binary.id) - - def cleanup(self): - resources = ["job_binary_internals", "job_binaries"] - - resource_manager.cleanup( - names=["sahara.%s" % res for res in resources], - users=self.context.get("users", []), - superclass=utils.SaharaScenario, - task_id=self.context["task"]["uuid"]) diff --git a/rally/plugins/openstack/context/sahara/sahara_output_data_sources.py b/rally/plugins/openstack/context/sahara/sahara_output_data_sources.py deleted file mode 100644 index 8e4367fbe9..0000000000 --- a/rally/plugins/openstack/context/sahara/sahara_output_data_sources.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack import osclients -from rally.plugins.openstack.scenarios.sahara import utils -from rally.plugins.openstack.scenarios.swift import utils as swift_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="sahara_output_data_sources", platform="openstack", - order=444) -class SaharaOutputDataSources(context.Context): - """Context class for setting up Output Data Sources for an EDP job.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "output_type": { - "enum": ["swift", "hdfs"], - }, - "output_url_prefix": { - "type": "string", - } - }, - "additionalProperties": False, - "required": ["output_type", "output_url_prefix"] - } - - def setup(self): - utils.init_sahara_context(self) - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - clients = osclients.Clients(user["credential"]) - sahara = clients.sahara() - - if self.config["output_type"] == "swift": - swift = swift_utils.SwiftScenario(clients=clients, - context=self.context) - container_name = self.generate_random_name() - self.context["tenants"][tenant_id]["sahara"]["container"] = { - "name": swift._create_container( - container_name=container_name), - "output_swift_objects": [] - } - self.setup_outputs_swift(swift, sahara, tenant_id, - container_name, - user["credential"].username, - user["credential"].password) - else: - self.setup_outputs_hdfs(sahara, tenant_id, - self.config["output_url_prefix"]) - - def setup_outputs_hdfs(self, sahara, tenant_id, output_url): - output_ds = sahara.data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type="hdfs", - url=output_url) - - self.context["tenants"][tenant_id]["sahara"]["output"] = output_ds.id - - def setup_outputs_swift(self, swift, sahara, tenant_id, container_name, - username, password): - output_ds_swift = sahara.data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type="swift", - url="swift://" + container_name + ".sahara/", - credential_user=username, - credential_pass=password) - - self.context["tenants"][tenant_id]["sahara"]["output"] = ( - output_ds_swift.id - ) - - def cleanup(self): - resource_manager.cleanup( - names=["swift.object", "swift.container"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) - resource_manager.cleanup( - names=["sahara.data_sources"], - users=self.context.get("users", []), - superclass=self.__class__, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/context/senlin/__init__.py b/rally/plugins/openstack/context/senlin/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/senlin/profiles.py b/rally/plugins/openstack/context/senlin/profiles.py deleted file mode 100644 index c27c1802f4..0000000000 --- a/rally/plugins/openstack/context/senlin/profiles.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils as rutils -from rally.common import validation -from rally import consts -from rally.plugins.openstack.scenarios.senlin import utils as senlin_utils -from rally.task import context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="profiles", platform="openstack", order=190) -class ProfilesGenerator(context.Context): - """Context creates a temporary profile for Senlin test.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "type": { - "type": "string", - }, - "version": { - "type": "string", - }, - "properties": { - "type": "object", - "additionalProperties": True, - } - }, - "additionalProperties": False, - "required": ["type", "version", "properties"] - } - - def setup(self): - """Create test profiles.""" - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - senlin_scenario = senlin_utils.SenlinScenario({ - "user": user, - "task": self.context["task"], - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - profile = senlin_scenario._create_profile(self.config) - - self.context["tenants"][tenant_id]["profile"] = profile.id - - def cleanup(self): - """Delete created test profiles.""" - for user, tenant_id in rutils.iterate_per_tenants( - self.context["users"]): - - senlin_scenario = senlin_utils.SenlinScenario({ - "user": user, - "task": self.context["task"], - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - senlin_scenario._delete_profile( - self.context["tenants"][tenant_id]["profile"]) diff --git a/rally/plugins/openstack/context/swift/__init__.py b/rally/plugins/openstack/context/swift/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/swift/objects.py b/rally/plugins/openstack/context/swift/objects.py deleted file mode 100644 index 6a6c82d5da..0000000000 --- a/rally/plugins/openstack/context/swift/objects.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.openstack.context.swift import utils as swift_utils -from rally.task import context - -LOG = logging.getLogger(__name__) - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="swift_objects", platform="openstack", order=360) -class SwiftObjectGenerator(swift_utils.SwiftObjectMixin, context.Context): - """Create containers and objects in each tenant.""" - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "containers_per_tenant": { - "type": "integer", - "minimum": 1 - }, - "objects_per_container": { - "type": "integer", - "minimum": 1 - }, - "object_size": { - "type": "integer", - "minimum": 1 - }, - "resource_management_workers": { - "type": "integer", - "minimum": 1 - } - }, - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "containers_per_tenant": 1, - "objects_per_container": 1, - "object_size": 1024, - "resource_management_workers": 30 - } - - def setup(self): - """Create containers and objects, using the broker pattern.""" - threads = self.config["resource_management_workers"] - - containers_per_tenant = self.config["containers_per_tenant"] - containers_num = len(self.context["tenants"]) * containers_per_tenant - LOG.debug("Creating %d containers using %d threads." - % (containers_num, threads)) - containers_count = len(self._create_containers(self.context, - containers_per_tenant, - threads)) - if containers_count != containers_num: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg="Failed to create the requested number of containers, " - "expected %(expected)s but got %(actual)s." - % {"expected": containers_num, "actual": containers_count}) - - objects_per_container = self.config["objects_per_container"] - objects_num = containers_num * objects_per_container - LOG.debug("Creating %d objects using %d threads." - % (objects_num, threads)) - objects_count = len(self._create_objects(self.context, - objects_per_container, - self.config["object_size"], - threads)) - if objects_count != objects_num: - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg="Failed to create the requested number of objects, " - "expected %(expected)s but got %(actual)s." - % {"expected": objects_num, "actual": objects_count}) - - def cleanup(self): - """Delete containers and objects, using the broker pattern.""" - threads = self.config["resource_management_workers"] - - self._delete_objects(self.context, threads) - self._delete_containers(self.context, threads) diff --git a/rally/plugins/openstack/context/swift/utils.py b/rally/plugins/openstack/context/swift/utils.py deleted file mode 100644 index 127c0a4adb..0000000000 --- a/rally/plugins/openstack/context/swift/utils.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tempfile - -from rally.common import broker -from rally.common import utils as rutils -from rally.plugins.openstack.scenarios.swift import utils as swift_utils - - -class SwiftObjectMixin(object): - """Mix-in method for Swift Object Context.""" - - def _create_containers(self, context, containers_per_tenant, threads): - """Create containers and store results in Rally context. - - :param context: dict, Rally context environment - :param containers_per_tenant: int, number of containers to create - per tenant - :param threads: int, number of threads to use for broker pattern - - :returns: list of tuples containing (account, container) - """ - containers = [] - - def publish(queue): - for user, tenant_id in (rutils.iterate_per_tenants( - context.get("users", []))): - context["tenants"][tenant_id]["containers"] = [] - for i in range(containers_per_tenant): - args = (user, context["tenants"][tenant_id]["containers"]) - queue.append(args) - - def consume(cache, args): - user, tenant_containers = args - if user["id"] not in cache: - cache[user["id"]] = swift_utils.SwiftScenario( - {"user": user, "task": context.get("task", {})}) - container_name = cache[user["id"]]._create_container() - tenant_containers.append({"user": user, - "container": container_name, - "objects": []}) - containers.append((user["tenant_id"], container_name)) - - broker.run(publish, consume, threads) - - return containers - - def _create_objects(self, context, objects_per_container, object_size, - threads): - """Create objects and store results in Rally context. - - :param context: dict, Rally context environment - :param objects_per_container: int, number of objects to create - per container - :param object_size: int, size of created swift objects in byte - :param threads: int, number of threads to use for broker pattern - - :returns: list of tuples containing (account, container, object) - """ - objects = [] - - with tempfile.TemporaryFile() as dummy_file: - # set dummy file to specified object size - dummy_file.truncate(object_size) - - def publish(queue): - for tenant_id in context["tenants"]: - containers = context["tenants"][tenant_id]["containers"] - for container in containers: - for i in range(objects_per_container): - queue.append(container) - - def consume(cache, container): - user = container["user"] - if user["id"] not in cache: - cache[user["id"]] = swift_utils.SwiftScenario( - {"user": user, "task": context.get("task", {})}) - dummy_file.seek(0) - object_name = cache[user["id"]]._upload_object( - container["container"], - dummy_file)[1] - container["objects"].append(object_name) - objects.append((user["tenant_id"], container["container"], - object_name)) - - broker.run(publish, consume, threads) - - return objects - - def _delete_containers(self, context, threads): - """Delete containers created by Swift context and update Rally context. - - :param context: dict, Rally context environment - :param threads: int, number of threads to use for broker pattern - """ - def publish(queue): - for tenant_id in context["tenants"]: - containers = context["tenants"][tenant_id]["containers"] - for container in containers[:]: - args = container, containers - queue.append(args) - - def consume(cache, args): - container, tenant_containers = args - user = container["user"] - if user["id"] not in cache: - cache[user["id"]] = swift_utils.SwiftScenario( - {"user": user, "task": context.get("task", {})}) - cache[user["id"]]._delete_container(container["container"]) - tenant_containers.remove(container) - - broker.run(publish, consume, threads) - - def _delete_objects(self, context, threads): - """Delete objects created by Swift context and update Rally context. - - :param context: dict, Rally context environment - :param threads: int, number of threads to use for broker pattern - """ - def publish(queue): - for tenant_id in context["tenants"]: - containers = context["tenants"][tenant_id]["containers"] - for container in containers: - for object_name in container["objects"][:]: - args = object_name, container - queue.append(args) - - def consume(cache, args): - object_name, container = args - user = container["user"] - if user["id"] not in cache: - cache[user["id"]] = swift_utils.SwiftScenario( - {"user": user, "task": context.get("task", {})}) - cache[user["id"]]._delete_object(container["container"], - object_name) - container["objects"].remove(object_name) - - broker.run(publish, consume, threads) diff --git a/rally/plugins/openstack/context/vm/__init__.py b/rally/plugins/openstack/context/vm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/vm/custom_image.py b/rally/plugins/openstack/context/vm/custom_image.py deleted file mode 100644 index 8549d67c47..0000000000 --- a/rally/plugins/openstack/context/vm/custom_image.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six - -from rally.common import broker -from rally.common import logging -from rally.common import utils -from rally import consts -from rally.plugins.openstack import osclients -from rally.plugins.openstack.scenarios.vm import vmtasks -from rally.plugins.openstack.services.image import image -from rally.plugins.openstack import types -from rally.task import context - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class BaseCustomImageGenerator(context.Context): - """Base plugin for the contexts providing customized image with. - - Every context plugin for the specific customization must implement - the method `_customize_image` that is able to connect to the server - using SSH and install applications inside it. - - This base context plugin provides a way to prepare an image with - custom preinstalled applications. Basically, this code boots a VM, calls - the `_customize_image` and then snapshots the VM disk, removing the VM - afterwards. The image UUID is stored in the user["custom_image"]["id"] - and can be used afterwards by scenario. - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "image": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "additionalProperties": False - }, - "flavor": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "additionalProperties": False - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "floating_network": { - "type": "string" - }, - "internal_network": { - "type": "string" - }, - "port": { - "type": "integer", - "minimum": 1, - "maximum": 65535 - }, - "userdata": { - "type": "string" - }, - "workers": { - "type": "integer", - "minimum": 1, - } - }, - "required": ["image", "flavor"], - "additionalProperties": False - } - - DEFAULT_CONFIG = { - "username": "root", - "port": 22, - "workers": 1 - } - - def setup(self): - """Creates custom image(s) with preinstalled applications. - - When admin is present creates one public image that is usable - from all the tenants and users. Otherwise create one image - per user and tenant. - """ - - if "admin" in self.context: - if self.context["users"]: - # NOTE(pboldin): Create by first user and make it public by - # the admin - user = self.context["users"][0] - else: - user = self.context["admin"] - tenant = self.context["tenants"][user["tenant_id"]] - - nics = None - if "networks" in tenant: - nics = [{"net-id": tenant["networks"][0]["id"]}] - - custom_image = self.create_one_image(user, nics=nics) - glance_service = image.Image( - self.context["admin"]["credential"].clients()) - glance_service.set_visibility(custom_image.id) - - for tenant in self.context["tenants"].values(): - tenant["custom_image"] = custom_image - else: - def publish(queue): - users = self.context.get("users", []) - for user, tenant_id in utils.iterate_per_tenants(users): - queue.append((user, tenant_id)) - - def consume(cache, args): - user, tenant_id = args - tenant = self.context["tenants"][tenant_id] - tenant["custom_image"] = self.create_one_image(user) - - broker.run(publish, consume, self.config["workers"]) - - def create_one_image(self, user, **kwargs): - """Create one image for the user.""" - - clients = osclients.Clients(user["credential"]) - - image_id = types.GlanceImage(self.context).pre_process( - resource_spec=self.config["image"], config={}) - flavor_id = types.Flavor(self.context).pre_process( - resource_spec=self.config["flavor"], config={}) - - vm_scenario = vmtasks.BootRuncommandDelete(self.context, - clients=clients) - - server, fip = vm_scenario._boot_server_with_fip( - image=image_id, flavor=flavor_id, - floating_network=self.config.get("floating_network"), - userdata=self.config.get("userdata"), - key_name=user["keypair"]["name"], - security_groups=[user["secgroup"]["name"]], - **kwargs) - - try: - LOG.debug("Installing tools on %r %s" % (server, fip["ip"])) - self.customize_image(server, fip, user) - - LOG.debug("Stopping server %r" % server) - vm_scenario._stop_server(server) - - LOG.debug("Creating snapshot for %r" % server) - custom_image = vm_scenario._create_image(server) - finally: - vm_scenario._delete_server_with_fip(server, fip) - - return custom_image - - def cleanup(self): - """Delete created custom image(s).""" - - if "admin" in self.context: - user = self.context["users"][0] - tenant = self.context["tenants"][user["tenant_id"]] - if "custom_image" in tenant: - self.delete_one_image(user, tenant["custom_image"]) - tenant.pop("custom_image") - else: - def publish(queue): - users = self.context.get("users", []) - for user, tenant_id in utils.iterate_per_tenants(users): - queue.append((user, tenant_id)) - - def consume(cache, args): - user, tenant_id = args - tenant = self.context["tenants"][tenant_id] - if "custom_image" in tenant: - self.delete_one_image(user, tenant["custom_image"]) - tenant.pop("custom_image") - - broker.run(publish, consume, self.config["workers"]) - - def delete_one_image(self, user, custom_image): - """Delete the image created for the user and tenant.""" - - with logging.ExceptionLogger( - LOG, "Unable to delete image %s" % custom_image.id): - - glance_service = image.Image(user["credential"].clients()) - glance_service.delete_image(custom_image.id) - - @logging.log_task_wrapper(LOG.info, "Custom image context: customizing") - def customize_image(self, server, ip, user): - return self._customize_image(server, ip, user) - - @abc.abstractmethod - def _customize_image(self, server, ip, user): - """Override this method with one that customizes image. - - Basically, code can simply call `VMScenario._run_command` function - specifying an installation script and interpreter. This script will - be then executed using SSH. - - :param server: nova.Server instance - :param ip: dict with server IP details - :param user: user who started a VM instance. Used to extract keypair - """ - pass diff --git a/rally/plugins/openstack/context/vm/image_command_customizer.py b/rally/plugins/openstack/context/vm/image_command_customizer.py deleted file mode 100644 index 1d007a8002..0000000000 --- a/rally/plugins/openstack/context/vm/image_command_customizer.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from rally.common import validation -from rally import exceptions -from rally.plugins.openstack.context.vm import custom_image -from rally.plugins.openstack.scenarios.vm import utils as vm_utils -import rally.task.context as context - - -@validation.add("required_platform", platform="openstack", users=True) -@context.configure(name="image_command_customizer", platform="openstack", - order=501) -class ImageCommandCustomizerContext(custom_image.BaseCustomImageGenerator): - """Context class for generating image customized by a command execution. - - Run a command specified by configuration to prepare image. - - Use this script e.g. to download and install something. - """ - - CONFIG_SCHEMA = copy.deepcopy( - custom_image.BaseCustomImageGenerator.CONFIG_SCHEMA) - CONFIG_SCHEMA["definitions"] = { - "stringOrStringList": { - "anyOf": [ - {"type": "string", "description": "just a string"}, - { - "type": "array", "description": "just a list of strings", - "items": {"type": "string"} - } - ] - }, - "scriptFile": { - "type": "object", - "properties": { - "script_file": {"$ref": "#/definitions/stringOrStringList"}, - "interpreter": {"$ref": "#/definitions/stringOrStringList"}, - "command_args": {"$ref": "#/definitions/stringOrStringList"} - }, - "required": ["script_file", "interpreter"], - "additionalProperties": False, - }, - "scriptInline": { - "type": "object", - "properties": { - "script_inline": {"type": "string"}, - "interpreter": {"$ref": "#/definitions/stringOrStringList"}, - "command_args": {"$ref": "#/definitions/stringOrStringList"} - }, - "required": ["script_inline", "interpreter"], - "additionalProperties": False, - }, - "commandPath": { - "type": "object", - "properties": { - "remote_path": {"$ref": "#/definitions/stringOrStringList"}, - "local_path": {"type": "string"}, - "command_args": {"$ref": "#/definitions/stringOrStringList"} - }, - "required": ["remote_path"], - "additionalProperties": False, - }, - "commandDict": { - "oneOf": [ - {"$ref": "#/definitions/scriptFile"}, - {"$ref": "#/definitions/scriptInline"}, - {"$ref": "#/definitions/commandPath"}, - ], - } - } - CONFIG_SCHEMA["properties"]["command"] = { - "$ref": "#/definitions/commandDict" - } - - def _customize_image(self, server, fip, user): - code, out, err = vm_utils.VMScenario(self.context)._run_command( - fip["ip"], self.config["port"], - self.config["username"], self.config.get("password"), - command=self.config["command"], - pkey=user["keypair"]["private"]) - - if code: - raise exceptions.ScriptError( - message="Command `%(command)s' execution failed," - " code %(code)d:\n" - "STDOUT:\n============================\n" - "%(out)s\n" - "STDERR:\n============================\n" - "%(err)s\n" - "============================\n" - % {"command": self.config["command"], "code": code, - "out": out, "err": err}) - - return code, out, err diff --git a/rally/plugins/openstack/context/watcher/__init__.py b/rally/plugins/openstack/context/watcher/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/context/watcher/audit_templates.py b/rally/plugins/openstack/context/watcher/audit_templates.py deleted file mode 100644 index 9f68d8c39b..0000000000 --- a/rally/plugins/openstack/context/watcher/audit_templates.py +++ /dev/null @@ -1,108 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -import six - -from rally.common import validation -from rally import consts -from rally.plugins.openstack.cleanup import manager as resource_manager -from rally.plugins.openstack.scenarios.watcher import utils as watcher_utils -from rally.plugins.openstack import types -from rally.task import context - - -@validation.add("required_platform", platform="openstack", admin=True) -@context.configure(name="audit_templates", platform="openstack", order=550) -class AuditTemplateGenerator(context.Context): - """Creates Watcher audit templates for tenants.""" - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "audit_templates_per_admin": {"type": "integer", "minimum": 1}, - "fill_strategy": {"enum": ["round_robin", "random", None]}, - "params": { - "type": "array", - "minItems": 1, - "uniqueItems": True, - "items": { - "type": "object", - "properties": { - "goal": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "additionalProperties": False - }, - "strategy": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - }, - "additionalProperties": False - }, - }, - "additionalProperties": False, - }, - } - }, - "additionalProperties": False, - "required": ["params"] - } - - DEFAULT_CONFIG = { - "audit_templates_per_admin": 1, - "fill_strategy": "round_robin" - } - - def setup(self): - watcher_scenario = watcher_utils.WatcherScenario( - {"admin": self.context["admin"], "task": self.context["task"], - "owner_id": self.context["owner_id"], - "config": { - "api_versions": self.context["config"].get( - "api_versions", [])} - }) - - self.context["audit_templates"] = [] - for i in six.moves.range(self.config["audit_templates_per_admin"]): - cfg_size = len(self.config["params"]) - if self.config["fill_strategy"] == "round_robin": - audit_params = self.config["params"][i % cfg_size] - elif self.config["fill_strategy"] == "random": - audit_params = random.choice(self.config["params"]) - - goal_id = types.WatcherGoal(self.context).pre_process( - resource_spec=audit_params["goal"], config={}) - strategy_id = types.WatcherStrategy(self.context).pre_process( - resource_spec=audit_params["strategy"], config={}) - - audit_template = watcher_scenario._create_audit_template( - goal_id, strategy_id) - self.context["audit_templates"].append(audit_template.uuid) - - def cleanup(self): - resource_manager.cleanup(names=["watcher.action_plan", - "watcher.audit_template"], - admin=self.context.get("admin", []), - superclass=watcher_utils.WatcherScenario, - task_id=self.get_owner_id()) diff --git a/rally/plugins/openstack/credential.py b/rally/plugins/openstack/credential.py deleted file mode 100644 index 189b8e7bbe..0000000000 --- a/rally/plugins/openstack/credential.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging - -LOG = logging.getLogger(__file__) - - -class OpenStackCredential(dict): - """Credential for OpenStack.""" - - def __init__(self, auth_url, username, password, tenant_name=None, - project_name=None, - permission=None, - region_name=None, endpoint_type=None, - domain_name=None, endpoint=None, user_domain_name=None, - project_domain_name=None, - https_insecure=False, https_cacert=None, - profiler_hmac_key=None, profiler_conn_str=None, **kwargs): - if kwargs: - raise TypeError("%s" % kwargs) - - # TODO(andreykurilin): deprecate permission and endpoint - - super(OpenStackCredential, self).__init__([ - ("auth_url", auth_url), - ("username", username), - ("password", password), - ("tenant_name", (tenant_name or project_name)), - ("permission", permission), - ("endpoint", endpoint), - ("region_name", region_name), - ("endpoint_type", endpoint_type), - ("domain_name", domain_name), - ("user_domain_name", user_domain_name), - ("project_domain_name", project_domain_name), - ("https_insecure", https_insecure), - ("https_cacert", https_cacert), - ("profiler_hmac_key", profiler_hmac_key), - ("profiler_conn_str", profiler_conn_str) - ]) - - self._clients_cache = {} - - def __getattr__(self, attr, default=None): - # TODO(andreykurilin): print warning to force everyone to use this - # object as raw dict as soon as we clean over code. - return self.get(attr, default) - - # backward compatibility - @property - def insecure(self): - LOG.warning("Property 'insecure' is deprecated since Rally 0.10.0. " - "Use 'https_insecure' instead.") - return self["https_insecure"] - - # backward compatibility - @property - def cacert(self): - LOG.warning("Property 'cacert' is deprecated since Rally 0.10.0. " - "Use 'https_cacert' instead.") - return self["https_cacert"] - - def to_dict(self): - return dict(self) - - def list_services(self): - LOG.warning("Method `list_services` of OpenStackCredentials is " - "deprecated since Rally 0.11.0. Use osclients instead.") - return sorted([{"type": stype, "name": sname} - for stype, sname in self.clients().services().items()], - key=lambda s: s["name"]) - - # this method is mostly used by validation step. let's refactor it and - # deprecated this - def clients(self, api_info=None): - from rally.plugins.openstack import osclients - - return osclients.Clients(self, api_info=api_info, - cache=self._clients_cache) - - def __deepcopy__(self, memodict=None): - import copy - return self.__class__(**copy.deepcopy(self.to_dict())) diff --git a/rally/plugins/openstack/embedcharts/__init__.py b/rally/plugins/openstack/embedcharts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/embedcharts/osprofilerchart.py b/rally/plugins/openstack/embedcharts/osprofilerchart.py deleted file mode 100644 index 0590f73c57..0000000000 --- a/rally/plugins/openstack/embedcharts/osprofilerchart.py +++ /dev/null @@ -1,88 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -from rally.common import logging -from rally.common.plugin import plugin -from rally.task.processing.charts import OutputTextArea - -LOG = logging.getLogger(__name__) - - -def _datetime_json_serialize(obj): - if hasattr(obj, "isoformat"): - return obj.isoformat() - else: - return obj - - -@plugin.configure(name="OSProfiler") -class OSProfilerChart(OutputTextArea): - """osprofiler content - - This plugin complete data of osprofiler - - """ - - widget = "OSProfiler" - - @classmethod - def get_osprofiler_data(cls, data): - - from osprofiler import cmd - from osprofiler.drivers import base - - try: - engine = base.get_driver(data["data"]["conn_str"]) - except Exception: - if logging.is_debug(): - LOG.exception("Error while fetching OSProfiler results.") - return None - - data["widget"] = "EmbedChart" - data["title"] = "{0} : {1}".format(data["title"], - data["data"]["trace_id"][0]) - - path = "%s/template.html" % os.path.dirname(cmd.__file__) - with open(path) as f: - html_obj = f.read() - - osp_data = engine.get_report(data["data"]["trace_id"][0]) - osp_data = json.dumps(osp_data, - indent=4, - separators=(",", ": "), - default=_datetime_json_serialize) - data["data"] = html_obj.replace("$DATA", osp_data) - data["data"] = data["data"].replace("$LOCAL", "false") - - # NOTE(chenxu): self._data will be passed to - # ["complete_output"]["data"] as a whole string and - # tag will be parsed incorrectly in javascript string - # so we turn it to <\/script> and turn it back in javascript. - data["data"] = data["data"].replace("/script>", "\/script>") - - return {"title": data["title"], - "widget": data["widget"], - "data": data["data"]} - - @classmethod - def render_complete_data(cls, data): - if data["data"].get("conn_str"): - result = cls.get_osprofiler_data(data) - if result: - return result - return {"title": data["title"], - "widget": "TextArea", - "data": data["data"]["trace_id"]} diff --git a/rally/plugins/openstack/hook/__init__.py b/rally/plugins/openstack/hook/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/hook/fault_injection.py b/rally/plugins/openstack/hook/fault_injection.py deleted file mode 100644 index 20fa20f724..0000000000 --- a/rally/plugins/openstack/hook/fault_injection.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.common import objects -from rally import consts -from rally.task import hook - -LOG = logging.getLogger(__name__) - - -@hook.configure(name="fault_injection", platform="openstack") -class FaultInjectionHook(hook.Hook): - """Performs fault injection using os-faults library. - - Configuration: - - * action - string that represents an action (more info in [1]) - * verify - whether to verify connection to cloud nodes or not - - This plugin discovers extra config of ExistingCloud - and looks for "cloud_config" field. If cloud_config is present then - it will be used to connect to the cloud by os-faults. - - Another option is to provide os-faults config file through - OS_FAULTS_CONFIG env variable. Format of the config can - be found in [1]. - - [1] http://os-faults.readthedocs.io/en/latest/usage.html - """ - - CONFIG_SCHEMA = { - "type": "object", - "$schema": consts.JSON_SCHEMA, - "properties": { - "action": {"type": "string"}, - "verify": {"type": "boolean"}, - }, - "required": [ - "action", - ], - "additionalProperties": False, - } - - def get_cloud_config(self): - deployment = objects.Deployment.get(self.task["deployment_uuid"]) - deployment_config = deployment["config"] - if deployment_config["type"] != "ExistingCloud": - return None - - extra_config = deployment_config.get("extra", {}) - return extra_config.get("cloud_config") - - def run(self): - import os_faults - - # get cloud configuration - cloud_config = self.get_cloud_config() - - # connect to the cloud - injector = os_faults.connect(cloud_config) - - # verify that all nodes are available - if self.config.get("verify"): - injector.verify() - - LOG.debug("Injecting fault: %s" % self.config["action"]) - os_faults.human_api(injector, self.config["action"]) diff --git a/rally/plugins/openstack/osclients.py b/rally/plugins/openstack/osclients.py deleted file mode 100644 index c781258d69..0000000000 --- a/rally/plugins/openstack/osclients.py +++ /dev/null @@ -1,845 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import os - -from six.moves.urllib import parse - -from rally.cli import envutils -from rally.common import cfg -from rally.common import logging -from rally.common.plugin import plugin -from rally.common import utils -from rally import consts -from rally import exceptions - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -def configure(name, default_version=None, default_service_type=None, - supported_versions=None): - """OpenStack client class wrapper. - - Each client class has to be wrapped by configure() wrapper. It - sets essential configuration of client classes. - - :param name: Name of the client - :param default_version: Default version for client - :param default_service_type: Default service type of endpoint(If this - variable is not specified, validation will assume that your client - doesn't allow to specify service type. - :param supported_versions: List of supported versions(If this variable is - not specified, `OSClients.validate_version` method will raise an - exception that client doesn't support setting any versions. If this - logic is wrong for your client, you should override `validate_version` - in client object) - """ - def wrapper(cls): - cls = plugin.configure(name=name, platform="openstack")(cls) - cls._meta_set("default_version", default_version) - cls._meta_set("default_service_type", default_service_type) - cls._meta_set("supported_versions", supported_versions or []) - return cls - - return wrapper - - -@plugin.base() -class OSClient(plugin.Plugin): - """Base class for openstack clients""" - - def __init__(self, credential, api_info, cache_obj): - self.credential = credential - if isinstance(self.credential, dict): - self.credential = utils.Struct(**self.credential) - self.api_info = api_info - self.cache = cache_obj - - def choose_version(self, version=None): - """Return version string. - - Choose version between transmitted(preferable value if present), - version from api_info(configured from a context) and default. - """ - # NOTE(andreykurilin): The result of choose is converted to string, - # since most of clients contain map for versioned modules, where a key - # is a string value of version. Example of map and its usage: - # - # from oslo_utils import importutils - # ... - # version_map = {"1": "someclient.v1.client.Client", - # "2": "someclient.v2.client.Client"} - # - # def Client(version, *args, **kwargs): - # cls = importutils.import_class(version_map[version]) - # return cls(*args, **kwargs) - # - # That is why type of version so important and we should ensure that - # version is a string object. - # For those clients which doesn't accept string value(for example - # zaqarclient), this method should be overridden. - version = (version or - self.api_info.get(self.get_name(), {}).get("version") or - self._meta_get("default_version")) - if version is not None: - version = str(version) - return version - - @classmethod - def get_supported_versions(cls): - return cls._meta_get("supported_versions") - - @classmethod - def validate_version(cls, version): - supported_versions = cls.get_supported_versions() - if supported_versions: - if str(version) not in supported_versions: - raise exceptions.ValidationError( - "'%(vers)s' is not supported. Should be one of " - "'%(supported)s'" - % {"vers": version, "supported": supported_versions}) - else: - raise exceptions.RallyException("Setting version is not supported") - try: - float(version) - except ValueError: - raise exceptions.ValidationError( - "'%s' is invalid. Should be numeric value." % version) - - def choose_service_type(self, service_type=None): - """Return service_type string. - - Choose service type between transmitted(preferable value if present), - service type from api_info(configured from a context) and default. - """ - return (service_type or - self.api_info.get(self.get_name(), {}).get("service_type") or - self._meta_get("default_service_type")) - - @classmethod - def is_service_type_configurable(cls): - """Just checks that client supports setting service type.""" - if cls._meta_get("default_service_type") is None: - raise exceptions.RallyException( - "Setting service type is not supported.") - - @property - def keystone(self): - return OSClient.get("keystone")(self.credential, self.api_info, - self.cache) - - def _get_session(self, auth_url=None, version=None): - LOG.warning( - "Method `rally.osclient.OSClient._get_session` is deprecated since" - " Rally 0.6.0. Use " - "`rally.osclient.OSClient.keystone.get_session` instead.") - return self.keystone.get_session(version) - - def _get_endpoint(self, service_type=None): - kw = {"service_type": self.choose_service_type(service_type), - "region_name": self.credential.region_name} - if self.credential.endpoint_type: - kw["interface"] = self.credential.endpoint_type - api_url = self.keystone.service_catalog.url_for(**kw) - return api_url - - def _get_auth_info(self, user_key="username", - password_key="password", - auth_url_key="auth_url", - project_name_key="project_id", - domain_name_key="domain_name", - user_domain_name_key="user_domain_name", - project_domain_name_key="project_domain_name", - cacert_key="cacert", - endpoint_type="endpoint_type", - ): - kw = { - user_key: self.credential.username, - password_key: self.credential.password, - auth_url_key: self.credential.auth_url, - cacert_key: self.credential.https_cacert, - } - if project_name_key: - kw.update({project_name_key: self.credential.tenant_name}) - - if "v2.0" not in self.credential.auth_url: - kw.update({ - domain_name_key: self.credential.domain_name}) - kw.update({ - user_domain_name_key: - self.credential.user_domain_name or "Default"}) - kw.update({ - project_domain_name_key: - self.credential.project_domain_name or "Default"}) - if self.credential.endpoint_type: - kw[endpoint_type] = self.credential.endpoint_type - return kw - - @abc.abstractmethod - def create_client(self, *args, **kwargs): - """Create new instance of client.""" - - def __call__(self, *args, **kwargs): - """Return initialized client instance.""" - key = "{0}{1}{2}".format(self.get_name(), - str(args) if args else "", - str(kwargs) if kwargs else "") - if key not in self.cache: - self.cache[key] = self.create_client(*args, **kwargs) - return self.cache[key] - - @classmethod - def get(cls, name, **kwargs): - # NOTE(boris-42): Remove this after we finish rename refactoring. - kwargs.pop("platform", None) - kwargs.pop("namespace", None) - return super(OSClient, cls).get(name, platform="openstack", **kwargs) - - -@configure("keystone", supported_versions=("2", "3")) -class Keystone(OSClient): - """Wrapper for KeystoneClient which hides OpenStack auth details.""" - - @property - def keystone(self): - raise exceptions.RallyException( - "Method 'keystone' is restricted for keystoneclient. :)") - - @property - def service_catalog(self): - return self.auth_ref.service_catalog - - @property - def auth_ref(self): - try: - if "keystone_auth_ref" not in self.cache: - sess, plugin = self.get_session() - self.cache["keystone_auth_ref"] = plugin.get_access(sess) - except Exception as e: - if logging.is_debug(): - LOG.exception("Unable to authenticate for user" - " %(username)s in project" - " %(tenant_name)s" % - {"username": self.credential.username, - "tenant_name": self.credential.tenant_name}) - raise exceptions.AuthenticationFailed( - username=self.credential.username, - project=self.credential.tenant_name, - url=self.credential.auth_url, - etype=e.__class__.__name__, - error=str(e)) - return self.cache["keystone_auth_ref"] - - def get_session(self, version=None): - key = "keystone_session_and_plugin_%s" % version - if key not in self.cache: - from keystoneauth1 import discover - from keystoneauth1 import identity - from keystoneauth1 import session - - version = self.choose_version(version) - auth_url = self.credential.auth_url - if version is not None: - auth_url = self._remove_url_version() - - password_args = { - "auth_url": auth_url, - "username": self.credential.username, - "password": self.credential.password, - "tenant_name": self.credential.tenant_name - } - - if version is None: - # NOTE(rvasilets): If version not specified than we discover - # available version with the smallest number. To be able to - # discover versions we need session - temp_session = session.Session( - verify=(self.credential.https_cacert or - not self.credential.https_insecure), - timeout=CONF.openstack_client_http_timeout) - version = str(discover.Discover( - temp_session, - password_args["auth_url"]).version_data()[0]["version"][0]) - - if "v2.0" not in password_args["auth_url"] and ( - version != "2"): - password_args.update({ - "user_domain_name": self.credential.user_domain_name, - "domain_name": self.credential.domain_name, - "project_domain_name": self.credential.project_domain_name - }) - identity_plugin = identity.Password(**password_args) - sess = session.Session( - auth=identity_plugin, - verify=(self.credential.https_cacert or - not self.credential.https_insecure), - timeout=CONF.openstack_client_http_timeout) - self.cache[key] = (sess, identity_plugin) - return self.cache[key] - - def _remove_url_version(self): - """Remove any version from the auth_url. - - The keystone Client code requires that auth_url be the root url - if a version override is used. - """ - url = parse.urlparse(self.credential.auth_url) - path = url.path.rstrip("/") - if path.endswith("v2.0") or path.endswith("v3"): - path = os.path.join(*os.path.split(path)[:-1]) - parts = (url.scheme, url.netloc, path, url.params, url.query, - url.fragment) - return parse.urlunparse(parts) - return self.credential.auth_url - - def create_client(self, version=None): - """Return a keystone client. - - :param version: Keystone API version, can be one of: - ("2", "3") - - If this object was constructed with a version in the api_info - then that will be used unless the version parameter is passed. - """ - import keystoneclient - from keystoneclient import client - - # Use the version in the api_info if provided, otherwise fall - # back to the passed version (which may be None, in which case - # keystoneclient chooses). - version = self.choose_version(version) - - sess = self.get_session(version=version)[0] - - kw = {"version": version, "session": sess, - "timeout": CONF.openstack_client_http_timeout} - if keystoneclient.__version__[0] == "1": - # NOTE(andreykurilin): let's leave this hack for envs which uses - # old(<2.0.0) keystoneclient version. Upstream fix: - # https://github.com/openstack/python-keystoneclient/commit/d9031c252848d89270a543b67109a46f9c505c86 - from keystoneauth1 import plugin - kw["auth_url"] = sess.get_endpoint(interface=plugin.AUTH_INTERFACE) - if self.credential.endpoint_type: - kw["interface"] = self.credential.endpoint_type - - # NOTE(amyge): - # In auth_ref(), plugin.get_access(sess) only returns a auth_ref object - # and won't check the authentication access until it is actually being - # called. To catch the authentication failure in auth_ref(), we will - # have to call self.auth_ref.auth_token here to actually use auth_ref. - self.auth_ref # noqa - - return client.Client(**kw) - - -@configure("nova", default_version="2", default_service_type="compute") -class Nova(OSClient): - """Wrapper for NovaClient which returns a authenticated native client.""" - - @classmethod - def validate_version(cls, version): - from novaclient import api_versions - from novaclient import exceptions as nova_exc - - try: - api_versions.get_api_version(version) - except nova_exc.UnsupportedVersion: - raise exceptions.RallyException( - "Version string '%s' is unsupported." % version) - - def create_client(self, version=None, service_type=None): - """Return nova client.""" - from novaclient import client as nova - - client = nova.Client( - session=self.keystone.get_session()[0], - version=self.choose_version(version), - endpoint_override=self._get_endpoint(service_type)) - return client - - -@configure("neutron", default_version="2.0", default_service_type="network", - supported_versions=["2.0"]) -class Neutron(OSClient): - """Wrapper for NeutronClient which returns an authenticated native client. - - """ - - def create_client(self, version=None, service_type=None): - """Return neutron client.""" - from neutronclient.neutron import client as neutron - - kw_args = {} - if self.credential.endpoint_type: - kw_args["endpoint_type"] = self.credential.endpoint_type - - client = neutron.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint_override=self._get_endpoint(service_type), - **kw_args) - return client - - -@configure("glance", default_version="2", default_service_type="image", - supported_versions=["1", "2"]) -class Glance(OSClient): - """Wrapper for GlanceClient which returns an authenticated native client. - - """ - - def create_client(self, version=None, service_type=None): - """Return glance client.""" - import glanceclient as glance - - session = self.keystone.get_session()[0] - client = glance.Client( - version=self.choose_version(version), - endpoint_override=self._get_endpoint(service_type), - session=session) - return client - - -@configure("heat", default_version="1", default_service_type="orchestration", - supported_versions=["1"]) -class Heat(OSClient): - """Wrapper for HeatClient which returns an authenticated native client.""" - def create_client(self, version=None, service_type=None): - """Return heat client.""" - from heatclient import client as heat - - # ToDo: Remove explicit endpoint_type or interface initialization - # when heatclient no longer uses it. - kw_args = {} - if self.credential.endpoint_type: - kw_args["interface"] = self.credential.endpoint_type - - client = heat.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint_override=self._get_endpoint(service_type), - **kw_args) - return client - - -@configure("cinder", default_version="2", default_service_type="volumev2", - supported_versions=["1", "2"]) -class Cinder(OSClient): - """Wrapper for CinderClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return cinder client.""" - from cinderclient import client as cinder - - client = cinder.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint_override=self._get_endpoint(service_type)) - return client - - -@configure("manila", default_version="1", default_service_type="share") -class Manila(OSClient): - """Wrapper for ManilaClient which returns an authenticated native client. - - """ - @classmethod - def validate_version(cls, version): - from manilaclient import api_versions - from manilaclient import exceptions as manila_exc - - try: - api_versions.get_api_version(version) - except manila_exc.UnsupportedVersion: - raise exceptions.RallyException( - "Version string '%s' is unsupported." % version) - - def create_client(self, version=None, service_type=None): - """Return manila client.""" - from manilaclient import client as manila - manila_client = manila.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - service_catalog_url=self._get_endpoint(service_type)) - return manila_client - - -@configure("ceilometer", default_version="2", default_service_type="metering", - supported_versions=["1", "2"]) -class Ceilometer(OSClient): - """Wrapper for CeilometerClient which returns authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return ceilometer client.""" - from ceilometerclient import client as ceilometer - - client = ceilometer.get_client( - self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint_override=self._get_endpoint(service_type)) - return client - - -@configure("gnocchi", default_service_type="metric", default_version="1", - supported_versions=["1"]) -class Gnocchi(OSClient): - """Wrapper for GnocchiClient which returns an authenticated native client. - - """ - - def create_client(self, version=None, service_type=None): - """Return gnocchi client.""" - # NOTE(sumantmurke): gnocchiclient requires keystoneauth1 for - # authenticating and creating a session. - from gnocchiclient import client as gnocchi - - service_type = self.choose_service_type(service_type) - sess = self.keystone.get_session()[0] - gclient = gnocchi.Client( - version=self.choose_version(version), session=sess, - adapter_options={"service_type": service_type}) - return gclient - - -@configure("ironic", default_version="1", default_service_type="baremetal", - supported_versions=["1"]) -class Ironic(OSClient): - """Wrapper for IronicClient which returns an authenticated native client. - - """ - - def create_client(self, version=None, service_type=None): - """Return Ironic client.""" - from ironicclient import client as ironic - - client = ironic.get_client( - self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint=self._get_endpoint(service_type)) - return client - - -@configure("sahara", default_version="1.1", supported_versions=["1.0", "1.1"], - default_service_type="data-processing") -class Sahara(OSClient): - """Wrapper for SaharaClient which returns an authenticated native client. - - """ - - # NOTE(andreykurilin): saharaclient supports "1.0" version and doesn't - # support "1". `choose_version` and `validate_version` methods are written - # as a hack to covert 1 -> 1.0, which can simplify setting saharaclient - # for end-users. - def choose_version(self, version=None): - return float(super(Sahara, self).choose_version(version)) - - @classmethod - def validate_version(cls, version): - super(Sahara, cls).validate_version(float(version)) - - def create_client(self, version=None, service_type=None): - """Return Sahara client.""" - from saharaclient import client as sahara - - client = sahara.Client( - self.choose_version(version), - session=self.keystone.get_session()[0], - sahara_url=self._get_endpoint(service_type)) - - return client - - -@configure("zaqar", default_version="1.1", default_service_type="messaging", - supported_versions=["1", "1.1"]) -class Zaqar(OSClient): - """Wrapper for ZaqarClient which returns an authenticated native client. - - """ - def choose_version(self, version=None): - # zaqarclient accepts only int or float obj as version - return float(super(Zaqar, self).choose_version(version)) - - def create_client(self, version=None, service_type=None): - """Return Zaqar client.""" - from zaqarclient.queues import client as zaqar - client = zaqar.Client(url=self._get_endpoint(), - version=self.choose_version(version), - session=self.keystone.get_session()[0]) - return client - - -@configure("murano", default_version="1", - default_service_type="application-catalog", - supported_versions=["1"]) -class Murano(OSClient): - """Wrapper for MuranoClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return Murano client.""" - from muranoclient import client as murano - - client = murano.Client(self.choose_version(version), - endpoint=self._get_endpoint(service_type), - token=self.keystone.auth_ref.auth_token) - - return client - - -@configure("designate", default_version="1", default_service_type="dns", - supported_versions=["1", "2"]) -class Designate(OSClient): - """Wrapper for DesignateClient which returns authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return designate client.""" - from designateclient import client - - version = self.choose_version(version) - - api_url = self._get_endpoint(service_type) - api_url += "/v%s" % version - - session = self.keystone.get_session()[0] - if version == "2": - return client.Client(version, session=session, - endpoint_override=api_url) - return client.Client(version, session=session, - endpoint=api_url) - - -@configure("trove", default_version="1.0", supported_versions=["1.0"], - default_service_type="database") -class Trove(OSClient): - """Wrapper for TroveClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Returns trove client.""" - from troveclient import client as trove - - client = trove.Client(self.choose_version(version), - session=self.keystone.get_session()[0], - endpoint=self._get_endpoint(service_type)) - return client - - -@configure("mistral", default_service_type="workflowv2") -class Mistral(OSClient): - """Wrapper for MistralClient which returns an authenticated native client. - - """ - def create_client(self, service_type=None): - """Return Mistral client.""" - from mistralclient.api import client as mistral - - client = mistral.client( - mistral_url=self._get_endpoint(service_type), - service_type=self.choose_service_type(service_type), - auth_token=self.keystone.auth_ref.auth_token) - return client - - -@configure("swift", default_service_type="object-store") -class Swift(OSClient): - """Wrapper for SwiftClient which returns an authenticated native client. - - """ - def create_client(self, service_type=None): - """Return swift client.""" - from swiftclient import client as swift - - auth_token = self.keystone.auth_ref.auth_token - client = swift.Connection(retries=1, - preauthurl=self._get_endpoint(service_type), - preauthtoken=auth_token, - insecure=self.credential.https_insecure, - cacert=self.credential.https_cacert, - user=self.credential.username, - tenant_name=self.credential.tenant_name, - ) - return client - - -@configure("ec2") -class EC2(OSClient): - """Wrapper for EC2Client which returns an authenticated native client. - - """ - def create_client(self): - """Return ec2 client.""" - LOG.warning("rally.osclient.EC2 is deprecated since Rally 0.10.0.") - - import boto - - kc = self.keystone() - - if kc.version != "v2.0": - raise exceptions.RallyException( - "Rally EC2 scenario supports only Keystone version 2") - ec2_credential = kc.ec2.create(user_id=kc.auth_user_id, - tenant_id=kc.auth_tenant_id) - client = boto.connect_ec2_endpoint( - url=self._get_endpoint(), - aws_access_key_id=ec2_credential.access, - aws_secret_access_key=ec2_credential.secret, - is_secure=self.credential.https_insecure) - return client - - -@configure("monasca", default_version="2_0", - default_service_type="monitoring", supported_versions=["2_0"]) -class Monasca(OSClient): - """Wrapper for MonascaClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return monasca client.""" - from monascaclient import client as monasca - - # Change this to use session once it's supported by monascaclient - client = monasca.Client( - self.choose_version(version), - self._get_endpoint(service_type), - token=self.keystone.auth_ref.auth_token, - timeout=CONF.openstack_client_http_timeout, - insecure=self.credential.https_insecure, - **self._get_auth_info(project_name_key="tenant_name")) - return client - - -@configure("senlin", default_version="1", default_service_type="clustering", - supported_versions=["1"]) -class Senlin(OSClient): - """Wrapper for SenlinClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return senlin client.""" - from senlinclient import client as senlin - - return senlin.Client( - self.choose_version(version), - **self._get_auth_info(project_name_key="project_name", - cacert_key="cert", - endpoint_type="interface")) - - -@configure("magnum", default_version="1", supported_versions=["1"], - default_service_type="container-infra",) -class Magnum(OSClient): - """Wrapper for MagnumClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return magnum client.""" - from magnumclient import client as magnum - - api_url = self._get_endpoint(service_type) - session = self.keystone.get_session()[0] - - return magnum.Client( - session=session, - interface=self.credential.endpoint_type, - magnum_url=api_url) - - -@configure("watcher", default_version="1", default_service_type="infra-optim", - supported_versions=["1"]) -class Watcher(OSClient): - """Wrapper for WatcherClient which returns an authenticated native client. - - """ - def create_client(self, version=None, service_type=None): - """Return watcher client.""" - from watcherclient import client as watcher_client - watcher_api_url = self._get_endpoint( - self.choose_service_type(service_type)) - client = watcher_client.Client( - self.choose_version(version), - endpoint=watcher_api_url, - session=self.keystone.get_session()[0]) - return client - - -class Clients(object): - """This class simplify and unify work with OpenStack python clients.""" - - def __init__(self, credential, api_info=None, cache=None): - self.credential = credential - self.api_info = api_info or {} - self.cache = cache or {} - - def __getattr__(self, client_name): - """Lazy load of clients.""" - return OSClient.get(client_name)(self.credential, self.api_info, - self.cache) - - @classmethod - def create_from_env(cls): - creds = envutils.get_creds_from_env_vars() - from rally.plugins.openstack import credential - oscred = credential.OpenStackCredential( - auth_url=creds["auth_url"], - username=creds["admin"]["username"], - password=creds["admin"]["password"], - tenant_name=creds["admin"]["tenant_name"], - endpoint_type=creds["endpoint_type"], - user_domain_name=creds["admin"].get("user_domain_name"), - project_domain_name=creds["admin"].get("project_domain_name"), - endpoint=creds["endpoint"], - region_name=creds["region_name"], - https_cacert=creds["https_cacert"], - https_insecure=creds["https_insecure"]) - return cls(oscred) - - def clear(self): - """Remove all cached client handles.""" - self.cache = {} - - def verified_keystone(self): - """Ensure keystone endpoints are valid and then authenticate - - :returns: Keystone Client - """ - # Ensure that user is admin - if "admin" not in [role.lower() for role in - self.keystone.auth_ref.role_names]: - raise exceptions.InvalidAdminException( - username=self.credential.username) - return self.keystone() - - def services(self): - """Return available services names and types. - - :returns: dict, {"service_type": "service_name", ...} - """ - if "services_data" not in self.cache: - services_data = {} - available_services = self.keystone.service_catalog.get_endpoints() - for stype in available_services.keys(): - if stype in consts.ServiceType: - services_data[stype] = consts.ServiceType[stype] - else: - services_data[stype] = "__unknown__" - self.cache["services_data"] = services_data - - return self.cache["services_data"] diff --git a/rally/plugins/openstack/platforms/__init__.py b/rally/plugins/openstack/platforms/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/platforms/existing.py b/rally/plugins/openstack/platforms/existing.py deleted file mode 100644 index c0aef4d6da..0000000000 --- a/rally/plugins/openstack/platforms/existing.py +++ /dev/null @@ -1,252 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import json -import traceback - -from rally.common import cfg -from rally.common import logging -from rally.env import platform -from rally.plugins.openstack import osclients - - -LOG = logging.getLogger(__name__) - -CONF = cfg.CONF - - -@platform.configure(name="existing", platform="openstack") -class OpenStack(platform.Platform): - """Default plugin for OpenStack platform - - It may be used to test any existing OpenStack API compatible cloud. - """ - CONFIG_SCHEMA = { - "type": "object", - "definitions": { - "user": { - "type": "object", - "oneOf": [ - { - "description": "Keystone V2.0", - "properties": { - "username": {"type": "string"}, - "password": {"type": "string"}, - "tenant_name": {"type": "string"}, - }, - "required": ["username", "password", "tenant_name"], - "additionalProperties": False - }, - { - "description": "Keystone V3.0", - "properties": { - "username": {"type": "string"}, - "password": {"type": "string"}, - "project_name": {"type": "string"}, - "domain_name": {"type": "string"}, - "user_domain_name": {"type": "string"}, - "project_domain_name": {"type": "string"}, - }, - "required": ["username", "password", "project_name"], - "additionalProperties": False - } - ], - } - }, - "properties": { - "auth_url": {"type": "string"}, - "region_name": {"type": "string"}, - "endpoint": {"type": ["string", "null"]}, - "endpoint_type": {"enum": ["public", "internal", "admin", None]}, - "https_insecure": {"type": "boolean"}, - "https_cacert": {"type": "string"}, - "profiler_hmac_key": {"type": ["string", "null"]}, - "profiler_conn_str": {"type": ["string", "null"]}, - "admin": {"$ref": "#/definitions/user"}, - "users": { - "type": "array", - "items": {"$ref": "#/definitions/user"}, - "minItems": 1 - } - }, - "anyOf": [ - { - "description": "The case when the admin is specified and the " - "users can be created via 'users@openstack' " - "context or 'existing_users' will be used.", - "required": ["admin", "auth_url"]}, - { - "description": "The case when the only existing users are " - "specified.", - "required": ["users", "auth_url"]} - ], - "additionalProperties": False - } - - def create(self): - defaults = { - "region_name": None, - "endpoint_type": None, - "domain_name": None, - "user_domain_name": cfg.CONF.openstack.user_domain, - "project_domain_name": cfg.CONF.openstack.project_domain, - "https_insecure": False, - "https_cacert": None - } - - """Converts creds of real OpenStack to internal presentation.""" - new_data = copy.deepcopy(self.spec) - if "endpoint" in new_data: - LOG.warning("endpoint is deprecated and not used.") - del new_data["endpoint"] - admin = new_data.pop("admin", None) - users = new_data.pop("users", []) - - if admin: - if "project_name" in admin: - admin["tenant_name"] = admin.pop("project_name") - admin.update(new_data) - for k, v in defaults.items(): - admin.setdefault(k, v) - for user in users: - if "project_name" in user: - user["tenant_name"] = user.pop("project_name") - user.update(new_data) - for k, v in defaults.items(): - user.setdefault(k, v) - return {"admin": admin, "users": users}, {} - - def destroy(self): - # NOTE(boris-42): No action need to be performed. - pass - - def cleanup(self, task_uuid=None): - return { - "message": "Coming soon!", - "discovered": 0, - "deleted": 0, - "failed": 0, - "resources": {}, - "errors": [] - } - - def check_health(self): - """Check whatever platform is alive.""" - if self.platform_data["admin"]: - try: - osclients.Clients( - self.platform_data["admin"]).verified_keystone() - except Exception: - d = copy.deepcopy(self.platform_data["admin"]) - d["password"] = "***" - return { - "available": False, - "message": ( - "Bad admin creds: \n%s" - % json.dumps(d, indent=2, sort_keys=True)), - "traceback": traceback.format_exc() - } - - for user in self.platform_data["users"]: - try: - osclients.Clients(user).keystone() - except Exception: - d = copy.deepcopy(user) - d["password"] = "***" - return { - "available": False, - "message": ( - "Bad user creds: \n%s" - % json.dumps(d, indent=2, sort_keys=True)), - "traceback": traceback.format_exc() - } - - return {"available": True} - - def info(self): - """Return information about cloud as dict.""" - active_user = (self.platform_data["admin"] or - self.platform_data["users"][0]) - services = [] - for stype, name in osclients.Clients(active_user).services().items(): - if name == "__unknown__": - # `__unknown__` name misleads, let's just not include it... - services.append({"type": stype}) - else: - services.append({"type": stype, "name": name}) - - return { - "info": { - "services": sorted(services, key=lambda x: x["type"]) - } - } - - def _get_validation_context(self): - return {"users@openstack": {}} - - @classmethod - def create_spec_from_sys_environ(cls, sys_environ): - - from oslo_utils import strutils - - required_env_vars = ["OS_AUTH_URL", "OS_USERNAME", "OS_PASSWORD"] - missing_env_vars = [v for v in required_env_vars if - v not in sys_environ] - if missing_env_vars: - return {"available": False, - "message": "The following variable(s) are missed: %s" % - missing_env_vars} - tenant_name = sys_environ.get("OS_PROJECT_NAME", - sys_environ.get("OS_TENANT_NAME")) - if tenant_name is None: - return {"available": False, - "message": "One of OS_PROJECT_NAME or OS_TENANT_NAME " - "should be specified."} - - endpoint_type = sys_environ.get("OS_ENDPOINT_TYPE", - sys_environ.get("OS_INTERFACE")) - if endpoint_type and "URL" in endpoint_type: - endpoint_type = endpoint_type.replace("URL", "") - - spec = { - "auth_url": sys_environ["OS_AUTH_URL"], - "admin": { - "username": sys_environ["OS_USERNAME"], - "password": sys_environ["OS_PASSWORD"], - "tenant_name": tenant_name - }, - "endpoint_type": endpoint_type, - "region_name": sys_environ.get("OS_REGION_NAME", ""), - "https_cacert": sys_environ.get("OS_CACERT", ""), - "https_insecure": strutils.bool_from_string( - sys_environ.get("OS_INSECURE")), - "profiler_hmac_key": sys_environ.get("OSPROFILER_HMAC_KEY"), - "profiler_conn_str": sys_environ.get("OSPROFILER_CONN_STR") - } - - user_domain_name = sys_environ.get("OS_USER_DOMAIN_NAME") - project_domain_name = sys_environ.get("OS_PROJECT_DOMAIN_NAME") - identity_api_version = sys_environ.get( - "OS_IDENTITY_API_VERSION", sys_environ.get("IDENTITY_API_VERSION")) - if (identity_api_version == "3" or - (identity_api_version is None and - (user_domain_name or project_domain_name))): - # it is Keystone v3 and it has another config scheme - spec["admin"]["project_name"] = spec["admin"].pop("tenant_name") - spec["admin"]["user_domain_name"] = user_domain_name or "Default" - project_domain_name = project_domain_name or "Default" - spec["admin"]["project_domain_name"] = project_domain_name - - return {"spec": spec, "available": True, "message": "Available"} diff --git a/rally/plugins/openstack/scenario.py b/rally/plugins/openstack/scenario.py deleted file mode 100644 index ad1c36c304..0000000000 --- a/rally/plugins/openstack/scenario.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import random - -from osprofiler import profiler - -from rally.common import cfg -from rally.common.plugin import plugin -from rally.plugins.openstack import osclients -from rally.task import context -from rally.task import scenario - -configure = functools.partial(scenario.configure, platform="openstack") - -CONF = cfg.CONF - - -@context.add_default_context("users@openstack", {}) -@plugin.default_meta(inherit=False) -class OpenStackScenario(scenario.Scenario): - """Base class for all OpenStack scenarios.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(OpenStackScenario, self).__init__(context) - if context: - api_info = {} - if "api_versions@openstack" in context.get("config", {}): - api_versions = context["config"]["api_versions@openstack"] - for service in api_versions: - api_info[service] = { - "version": api_versions[service].get("version"), - "service_type": api_versions[service].get( - "service_type")} - - if admin_clients is None and "admin" in context: - self._admin_clients = osclients.Clients( - context["admin"]["credential"], api_info) - if clients is None: - if "users" in context and "user" not in context: - self._choose_user(context) - - if "user" in context: - self._clients = osclients.Clients( - context["user"]["credential"], api_info) - - if admin_clients: - self._admin_clients = admin_clients - - if clients: - self._clients = clients - - self._init_profiler(context) - - def _choose_user(self, context): - """Choose one user from users context - - We are choosing on each iteration one user - - """ - if context["user_choice_method"] == "random": - user = random.choice(context["users"]) - tenant = context["tenants"][user["tenant_id"]] - else: - # Second and last case - 'round_robin'. - tenants_amount = len(context["tenants"]) - # NOTE(amaretskiy): iteration is subtracted by `1' because it - # starts from `1' but we count from `0' - iteration = context["iteration"] - 1 - tenant_index = int(iteration % tenants_amount) - tenant_id = sorted(context["tenants"].keys())[tenant_index] - tenant = context["tenants"][tenant_id] - users = context["tenants"][tenant_id]["users"] - user_index = int((iteration / tenants_amount) % len(users)) - user = users[user_index] - - context["user"], context["tenant"] = user, tenant - - def clients(self, client_type, version=None): - """Returns a python openstack client of the requested type. - - Only one non-admin user is used per every run of scenario. - - :param client_type: Client type ("nova"/"glance" etc.) - :param version: client version ("1"/"2" etc.) - - :returns: Standard python OpenStack client instance - """ - client = getattr(self._clients, client_type) - - return client(version) if version is not None else client() - - def admin_clients(self, client_type, version=None): - """Returns a python admin openstack client of the requested type. - - :param client_type: Client type ("nova"/"glance" etc.) - :param version: client version ("1"/"2" etc.) - - :returns: Python openstack client object - """ - client = getattr(self._admin_clients, client_type) - - return client(version) if version is not None else client() - - def _init_profiler(self, context): - """Inits the profiler.""" - if not CONF.openstack.enable_profiler: - return - if context is not None: - cred = None - profiler_hmac_key = None - profiler_conn_str = None - if context.get("admin"): - cred = context["admin"]["credential"] - if cred.profiler_hmac_key is not None: - profiler_hmac_key = cred.profiler_hmac_key - profiler_conn_str = cred.profiler_conn_str - if context.get("user"): - cred = context["user"]["credential"] - if cred.profiler_hmac_key is not None: - profiler_hmac_key = cred.profiler_hmac_key - profiler_conn_str = cred.profiler_conn_str - if profiler_hmac_key is None: - return - profiler.init(profiler_hmac_key) - trace_id = profiler.get().get_base_id() - complete_data = {"title": "OSProfiler Trace-ID", - "chart_plugin": "OSProfiler", - "data": {"trace_id": [trace_id], - "conn_str": profiler_conn_str}} - self.add_output(complete=complete_data) diff --git a/rally/plugins/openstack/scenarios/__init__.py b/rally/plugins/openstack/scenarios/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/authenticate/__init__.py b/rally/plugins/openstack/scenarios/authenticate/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/authenticate/authenticate.py b/rally/plugins/openstack/scenarios/authenticate/authenticate.py deleted file mode 100644 index ad54d3850d..0000000000 --- a/rally/plugins/openstack/scenarios/authenticate/authenticate.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import validation - - -"""Scenarios for Authentication mechanism.""" - - -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.keystone", platform="openstack") -class Keystone(scenario.OpenStackScenario): - - @atomic.action_timer("authenticate.keystone") - def run(self): - """Check Keystone Client.""" - self.clients("keystone") - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_glance", platform="openstack") -class ValidateGlance(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Glance Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - In following we are checking for non-existent image. - - :param repetitions: number of times to validate - """ - glance_client = self.clients("glance") - image_name = "__intentionally_non_existent_image___" - with atomic.ActionTimer(self, "authenticate.validate_glance"): - for i in range(repetitions): - list(glance_client.images.list(name=image_name)) - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_nova", platform="openstack") -class ValidateNova(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Nova Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - nova_client = self.clients("nova") - with atomic.ActionTimer(self, "authenticate.validate_nova"): - for i in range(repetitions): - nova_client.flavors.list() - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_ceilometer", - platform="openstack") -class ValidateCeilometer(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Ceilometer Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - ceilometer_client = self.clients("ceilometer") - with atomic.ActionTimer(self, "authenticate.validate_ceilometer"): - for i in range(repetitions): - ceilometer_client.meters.list() - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_cinder", platform="openstack") -class ValidateCinder(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Cinder Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - cinder_client = self.clients("cinder") - with atomic.ActionTimer(self, "authenticate.validate_cinder"): - for i in range(repetitions): - cinder_client.volume_types.list() - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_neutron", platform="openstack") -class ValidateNeutron(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Neutron Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - neutron_client = self.clients("neutron") - with atomic.ActionTimer(self, "authenticate.validate_neutron"): - for i in range(repetitions): - neutron_client.list_networks() - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Authenticate.validate_heat", platform="openstack") -class ValidateHeat(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Heat Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - heat_client = self.clients("heat") - with atomic.ActionTimer(self, "authenticate.validate_heat"): - for i in range(repetitions): - list(heat_client.stacks.list(limit=0)) - - -@validation.add("number", param_name="repetitions", minval=1) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MONASCA]) -@scenario.configure(name="Authenticate.validate_monasca", platform="openstack") -class ValidateMonasca(scenario.OpenStackScenario): - - def run(self, repetitions): - """Check Monasca Client to ensure validation of token. - - Creation of the client does not ensure validation of the token. - We have to do some minimal operation to make sure token gets validated. - - :param repetitions: number of times to validate - """ - monasca_client = self.clients("monasca") - with atomic.ActionTimer(self, "authenticate.validate_monasca"): - for i in range(repetitions): - list(monasca_client.metrics.list(limit=0)) diff --git a/rally/plugins/openstack/scenarios/ceilometer/__init__.py b/rally/plugins/openstack/scenarios/ceilometer/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/ceilometer/alarms.py b/rally/plugins/openstack/scenarios/ceilometer/alarms.py deleted file mode 100644 index 2c1af1e429..0000000000 --- a/rally/plugins/openstack/scenarios/ceilometer/alarms.py +++ /dev/null @@ -1,196 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ceilometer"]}, - name="CeilometerAlarms.create_alarm", - platform="openstack") -class CreateAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create an alarm. - - This scenarios test POST /v2/alarms. - meter_name and threshold are required parameters for alarm creation. - kwargs stores other optional parameters like 'ok_actions', - 'project_id' etc that may be passed while creating an alarm. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - - self._create_alarm(meter_name, threshold, kwargs) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerAlarms.list_alarms", platform="openstack") -class ListAlarms(ceiloutils.CeilometerScenario): - - def run(self): - """Fetch all alarms. - - This scenario fetches list of all alarms using GET /v2/alarms. - """ - self._list_alarms() - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ceilometer"]}, - name="CeilometerAlarms.create_and_list_alarm", - platform="openstack") -class CreateAndListAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create and get the newly created alarm. - - This scenarios test GET /v2/alarms/(alarm_id) - Initially alarm is created and then the created alarm is fetched using - its alarm_id. meter_name and threshold are required parameters - for alarm creation. kwargs stores other optional parameters like - 'ok_actions', 'project_id' etc. that may be passed while creating - an alarm. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - self._list_alarms(alarm.alarm_id) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ceilometer"]}, - name="CeilometerAlarms.create_and_get_alarm", - platform="openstack") -class CreateAndGetAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create and get the newly created alarm. - - These scenarios test GET /v2/alarms/(alarm_id) - Initially an alarm is created and then its detailed information is - fetched using its alarm_id. meter_name and threshold are required - parameters for alarm creation. kwargs stores other optional parameters - like 'ok_actions', 'project_id' etc. that may be passed while creating - an alarm. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - self._get_alarm(alarm.alarm_id) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ceilometer"]}, - name="CeilometerAlarms.create_and_update_alarm", - platform="openstack") -class CreateAndUpdateAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create and update the newly created alarm. - - This scenarios test PUT /v2/alarms/(alarm_id) - Initially alarm is created and then the created alarm is updated using - its alarm_id. meter_name and threshold are required parameters - for alarm creation. kwargs stores other optional parameters like - 'ok_actions', 'project_id' etc that may be passed while alarm creation. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - alarm_dict_diff = {"description": "Changed Test Description"} - self._update_alarm(alarm.alarm_id, alarm_dict_diff) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ceilometer"]}, - name="CeilometerAlarms.create_and_delete_alarm", - platform="openstack") -class CreateAndDeleteAlarm(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, **kwargs): - """Create and delete the newly created alarm. - - This scenarios test DELETE /v2/alarms/(alarm_id) - Initially alarm is created and then the created alarm is deleted using - its alarm_id. meter_name and threshold are required parameters - for alarm creation. kwargs stores other optional parameters like - 'ok_actions', 'project_id' etc that may be passed while alarm creation. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - self._delete_alarm(alarm.alarm_id) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ceilometer"]}, - name="CeilometerAlarms.create_alarm_and_get_history", - platform="openstack") -class CreateAlarmAndGetHistory(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, state, timeout=60, **kwargs): - """Create an alarm, get and set the state and get the alarm history. - - This scenario makes following queries: - - * GET /v2/alarms/{alarm_id}/history - * GET /v2/alarms/{alarm_id}/state - * PUT /v2/alarms/{alarm_id}/state - - Initially alarm is created and then get the state of the created alarm - using its alarm_id. Then get the history of the alarm. And finally the - state of the alarm is updated using given state. meter_name and - threshold are required parameters for alarm creation. kwargs stores - other optional parameters like 'ok_actions', 'project_id' etc that may - be passed while alarm creation. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param state: an alarm state to be set - :param timeout: The number of seconds for which to attempt a - successful check of the alarm state - :param kwargs: specifies optional arguments for alarm creation. - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - self._get_alarm_state(alarm.alarm_id) - self._get_alarm_history(alarm.alarm_id) - self._set_alarm_state(alarm, state, timeout) diff --git a/rally/plugins/openstack/scenarios/ceilometer/events.py b/rally/plugins/openstack/scenarios/ceilometer/events.py deleted file mode 100644 index 4ee4707f8d..0000000000 --- a/rally/plugins/openstack/scenarios/ceilometer/events.py +++ /dev/null @@ -1,97 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Scenarios for Ceilometer Events API. -""" - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as cutils -from rally.plugins.openstack.scenarios.keystone import basic as kbasic -from rally.task import validation - - -# NOTE(idegtiarov): to work with event we need to create it, there are -# no other way except emit suitable notification from one of services, -# for example create new user in keystone. - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"], - "cleanup@openstack": ["ceilometer"]}, - name="CeilometerEvents.create_user_and_list_events", - platform="openstack") -class CeilometerEventsCreateUserAndListEvents(cutils.CeilometerScenario, - kbasic.KeystoneBasic): - - def run(self): - """Create user and fetch all events. - - This scenario creates user to store new event and - fetches list of all events using GET /v2/events. - """ - self.admin_keystone.create_user() - events = self._list_events() - msg = ("Events list is empty, but it should include at least one " - "event about user creation") - self.assertTrue(events, msg) - - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"], - "cleanup@openstack": ["ceilometer"]}, - name="CeilometerEvents.create_user_and_list_event_types", - platform="openstack") -class CeilometerEventsCreateUserAndListEventTypes(cutils.CeilometerScenario, - kbasic.KeystoneBasic): - - def run(self): - """Create user and fetch all event types. - - This scenario creates user to store new event and - fetches list of all events types using GET /v2/event_types. - """ - self.admin_keystone.create_user() - event_types = self._list_event_types() - msg = ("Event types list is empty, but it should include at least one" - " type about user creation") - self.assertTrue(event_types, msg) - - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"], - "cleanup@openstack": ["ceilometer"]}, - name="CeilometerEvents.create_user_and_get_event", - platform="openstack") -class CeilometerEventsCreateUserAndGetEvent(cutils.CeilometerScenario, - kbasic.KeystoneBasic): - - def run(self): - """Create user and gets event. - - This scenario creates user to store new event and - fetches one event using GET /v2/events/. - """ - self.admin_keystone.create_user() - events = self._list_events() - msg = ("Events list is empty, but it should include at least one " - "event about user creation") - self.assertTrue(events, msg) - self._get_event(event_id=events[0].message_id) diff --git a/rally/plugins/openstack/scenarios/ceilometer/meters.py b/rally/plugins/openstack/scenarios/ceilometer/meters.py deleted file mode 100644 index 0552faf69a..0000000000 --- a/rally/plugins/openstack/scenarios/ceilometer/meters.py +++ /dev/null @@ -1,71 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - - -"""Scenarios for Ceilometer Meters API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerMeters.list_meters", platform="openstack") -class ListMeters(ceiloutils.CeilometerScenario): - - def run(self, metadata_query=None, limit=None): - """Check all available queries for list resource request. - - :param metadata_query: dict with metadata fields and values - :param limit: limit of meters in response - """ - - scenario = ListMatchedMeters(self.context) - scenario.run(filter_by_project_id=True) - scenario.run(filter_by_user_id=True) - scenario.run(filter_by_resource_id=True) - if metadata_query: - scenario.run(metadata_query=metadata_query) - if limit: - scenario.run(limit=limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerMeters.list_matched_meters", - platform="openstack") -class ListMatchedMeters(ceiloutils.CeilometerScenario): - - def run(self, filter_by_user_id=False, filter_by_project_id=False, - filter_by_resource_id=False, metadata_query=None, limit=None): - """Get meters that matched fields from context and args. - - :param filter_by_user_id: flag for query by user_id - :param filter_by_project_id: flag for query by project_id - :param filter_by_resource_id: flag for query by resource_id - :param metadata_query: dict with metadata fields and values for query - :param limit: count of resources in response - """ - - query = self._make_general_query(filter_by_project_id, - filter_by_user_id, - filter_by_resource_id, - metadata_query) - self._list_meters(query, limit) diff --git a/rally/plugins/openstack/scenarios/ceilometer/queries.py b/rally/plugins/openstack/scenarios/ceilometer/queries.py deleted file mode 100644 index 7a28fe8734..0000000000 --- a/rally/plugins/openstack/scenarios/ceilometer/queries.py +++ /dev/null @@ -1,112 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - - -"""Scenarios for Ceilometer Queries API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ceilometer"]}, - name="CeilometerQueries.create_and_query_alarms", - platform="openstack") -class CeilometerQueriesCreateAndQueryAlarms(ceiloutils.CeilometerScenario): - - def run(self, meter_name, threshold, filter=None, orderby=None, - limit=None, **kwargs): - """Create an alarm and then query it with specific parameters. - - This scenario tests POST /v2/query/alarms - An alarm is first created and then fetched using the input query. - - :param meter_name: specifies meter name of alarm - :param threshold: specifies alarm threshold - :param filter: optional filter query dictionary - :param orderby: optional param for specifying ordering of results - :param limit: optional param for maximum number of results returned - :param kwargs: optional parameters for alarm creation - """ - if filter: - filter = json.dumps(filter) - - self._create_alarm(meter_name, threshold, kwargs) - self._query_alarms(filter, orderby, limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ceilometer"]}, - name="CeilometerQueries.create_and_query_alarm_history", - platform="openstack") -class CeilometerQueriesCreateAndQueryAlarmHistory(ceiloutils - .CeilometerScenario): - - def run(self, meter_name, threshold, orderby=None, limit=None, **kwargs): - """Create an alarm and then query for its history. - - This scenario tests POST /v2/query/alarms/history - An alarm is first created and then its alarm_id is used to fetch the - history of that specific alarm. - - :param meter_name: specifies meter name of alarm - :param threshold: specifies alarm threshold - :param orderby: optional param for specifying ordering of results - :param limit: optional param for maximum number of results returned - :param kwargs: optional parameters for alarm creation - """ - alarm = self._create_alarm(meter_name, threshold, kwargs) - alarm_filter = json.dumps({"=": {"alarm_id": alarm.alarm_id}}) - self._query_alarm_history(alarm_filter, orderby, limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ceilometer"]}, - name="CeilometerQueries.create_and_query_samples", - platform="openstack") -class CeilometerQueriesCreateAndQuerySamples(ceiloutils.CeilometerScenario): - - def run(self, counter_name, counter_type, counter_unit, counter_volume, - resource_id, filter=None, orderby=None, limit=None, **kwargs): - """Create a sample and then query it with specific parameters. - - This scenario tests POST /v2/query/samples - A sample is first created and then fetched using the input query. - - :param counter_name: specifies name of the counter - :param counter_type: specifies type of the counter - :param counter_unit: specifies unit of the counter - :param counter_volume: specifies volume of the counter - :param resource_id: specifies resource id for the sample created - :param filter: optional filter query dictionary - :param orderby: optional param for specifying ordering of results - :param limit: optional param for maximum number of results returned - :param kwargs: parameters for sample creation - """ - self._create_sample(counter_name, counter_type, counter_unit, - counter_volume, resource_id, **kwargs) - - if filter: - filter = json.dumps(filter) - self._query_samples(filter, orderby, limit) diff --git a/rally/plugins/openstack/scenarios/ceilometer/resources.py b/rally/plugins/openstack/scenarios/ceilometer/resources.py deleted file mode 100644 index b6d9cde54c..0000000000 --- a/rally/plugins/openstack/scenarios/ceilometer/resources.py +++ /dev/null @@ -1,107 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - - -"""Scenarios for Ceilometer Resource API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerResource.list_resources", - platform="openstack") -class ListResources(ceiloutils.CeilometerScenario): - - def run(self, metadata_query=None, start_time=None, - end_time=None, limit=None): - """Check all available queries for list resource request. - - This scenario fetches list of all resources using GET /v2/resources. - - :param metadata_query: dict with metadata fields and values for query - :param start_time: lower bound of resource timestamp in isoformat - :param end_time: upper bound of resource timestamp in isoformat - :param limit: count of resources in response - """ - scenario = ListMatchedResources(self.context) - scenario.run(filter_by_project_id=True) - scenario.run(filter_by_user_id=True) - scenario.run(filter_by_resource_id=True) - if metadata_query: - scenario.run(metadata_query=metadata_query) - if start_time: - scenario.run(start_time=start_time) - if end_time: - scenario.run(end_time=end_time) - if start_time and end_time: - scenario.run(start_time=start_time, end_time=end_time) - if limit: - scenario.run(limit=limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerResource.get_tenant_resources", - platform="openstack") -class GetTenantResources(ceiloutils.CeilometerScenario): - - def run(self): - """Get all tenant resources. - - This scenario retrieves information about tenant resources using - GET /v2/resources/(resource_id) - """ - resources = self.context["tenant"].get("resources", []) - msg = ("No resources found for tenant: %s" - % self.context["tenant"].get("name")) - self.assertTrue(resources, msg) - for res_id in resources: - self._get_resource(res_id) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerResource.list_matched_resources", - platform="openstack") -class ListMatchedResources(ceiloutils.CeilometerScenario): - - def run(self, filter_by_user_id=False, filter_by_project_id=False, - filter_by_resource_id=False, metadata_query=None, start_time=None, - end_time=None, limit=None): - """Get resources that matched fields from context and args. - - :param filter_by_user_id: flag for query by user_id - :param filter_by_project_id: flag for query by project_id - :param filter_by_resource_id: flag for query by resource_id - :param metadata_query: dict with metadata fields and values for query - :param start_time: lower bound of resource timestamp in isoformat - :param end_time: upper bound of resource timestamp in isoformat - :param limit: count of resources in response - """ - - query = self._make_general_query(filter_by_project_id, - filter_by_user_id, - filter_by_resource_id, - metadata_query) - query += self._make_timestamp_query(start_time, end_time) - self._list_resources(query, limit) diff --git a/rally/plugins/openstack/scenarios/ceilometer/samples.py b/rally/plugins/openstack/scenarios/ceilometer/samples.py deleted file mode 100644 index 2f1f19d776..0000000000 --- a/rally/plugins/openstack/scenarios/ceilometer/samples.py +++ /dev/null @@ -1,71 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils -from rally.task import validation - - -"""Scenarios for Ceilometer Samples API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerSamples.list_matched_samples", - platform="openstack") -class ListMatchedSamples(ceiloutils.CeilometerScenario): - - def run(self, filter_by_resource_id=False, filter_by_project_id=False, - filter_by_user_id=False, metadata_query=None, limit=None): - """Get list of samples that matched fields from context and args. - - :param filter_by_user_id: flag for query by user_id - :param filter_by_project_id: flag for query by project_id - :param filter_by_resource_id: flag for query by resource_id - :param metadata_query: dict with metadata fields and values for query - :param limit: count of samples in response - """ - query = self._make_general_query(filter_by_project_id, - filter_by_user_id, - filter_by_resource_id, - metadata_query) - self._list_samples(query, limit) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerSamples.list_samples", - platform="openstack") -class ListSamples(ceiloutils.CeilometerScenario): - - def run(self, metadata_query=None, limit=None): - """Fetch all available queries for list sample request. - - :param metadata_query: dict with metadata fields and values for query - :param limit: count of samples in response - """ - - scenario = ListMatchedSamples(self.context) - scenario.run(filter_by_project_id=True) - scenario.run(filter_by_user_id=True) - scenario.run(filter_by_resource_id=True) - if metadata_query: - scenario.run(metadata_query=metadata_query) - if limit: - scenario.run(limit=limit) diff --git a/rally/plugins/openstack/scenarios/ceilometer/stats.py b/rally/plugins/openstack/scenarios/ceilometer/stats.py deleted file mode 100644 index ff8aedeb04..0000000000 --- a/rally/plugins/openstack/scenarios/ceilometer/stats.py +++ /dev/null @@ -1,76 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils -from rally.task import validation - - -"""Scenarios for Ceilometer Stats API.""" - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerStats.create_meter_and_get_stats", - platform="openstack") -class CreateMeterAndGetStats(utils.CeilometerScenario): - - @logging.log_deprecated("Use 'get_stats' method, now samples are created" - "in context", "0.1.2") - def run(self, **kwargs): - """Create a meter and fetch its statistics. - - Meter is first created and then statistics is fetched for the same - using GET /v2/meters/(meter_name)/statistics. - - :param kwargs: contains optional arguments to create a meter - """ - meter = self._create_meter(**kwargs) - self._get_stats(meter.counter_name) - - -@validation.add("required_services", - services=[consts.Service.CEILOMETER]) -@validation.add("required_contexts", contexts=("ceilometer")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CeilometerStats.get_stats", platform="openstack") -class GetStats(utils.CeilometerScenario): - - def run(self, meter_name, filter_by_user_id=False, - filter_by_project_id=False, filter_by_resource_id=False, - metadata_query=None, period=None, groupby=None, aggregates=None): - """Fetch statistics for certain meter. - - Statistics is fetched for the using - GET /v2/meters/(meter_name)/statistics. - - :param meter_name: meter to take statistic for - :param filter_by_user_id: flag for query by user_id - :param filter_by_project_id: flag for query by project_id - :param filter_by_resource_id: flag for query by resource_id - :param metadata_query: dict with metadata fields and values for query - :param period: the length of the time range covered by these stats - :param groupby: the fields used to group the samples - :param aggregates: name of function for samples aggregation - - :returns: list of statistics data - """ - query = self._make_general_query(filter_by_project_id, - filter_by_user_id, - filter_by_resource_id, - metadata_query) - self._get_stats(meter_name, query, period, groupby, aggregates) diff --git a/rally/plugins/openstack/scenarios/ceilometer/traits.py b/rally/plugins/openstack/scenarios/ceilometer/traits.py deleted file mode 100644 index 25d69b5912..0000000000 --- a/rally/plugins/openstack/scenarios/ceilometer/traits.py +++ /dev/null @@ -1,74 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ceilometer import utils as cutils -from rally.plugins.openstack.scenarios.keystone import basic as kbasic -from rally.task import validation - - -"""Scenarios for Ceilometer Events API.""" - - -# NOTE(idegtiarov): to work with traits we need to create event firstly, -# there are no other way except emit suitable notification from one of -# services, for example create new user in keystone. - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure( - context={"admin_cleanup@openstack": ["keystone"], - "cleanup@openstack": ["ceilometer"]}, - name="CeilometerTraits.create_user_and_list_traits", - platform="openstack") -class CreateUserAndListTraits(cutils.CeilometerScenario, - kbasic.KeystoneBasic): - - def run(self): - """Create user and fetch all event traits. - - This scenario creates user to store new event and - fetches list of all traits for certain event type and - trait name using GET /v2/event_types//traits/. - """ - self.admin_keystone.create_user() - event = self._list_events()[0] - trait_name = event.traits[0]["name"] - self._list_event_traits(event_type=event.event_type, - trait_name=trait_name) - - -@validation.add("required_services", services=[consts.Service.CEILOMETER, - consts.Service.KEYSTONE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure( - context={"admin_cleanup@openstack": ["keystone"], - "cleanup@openstack": ["ceilometer"]}, - name="CeilometerTraits.create_user_and_list_trait_descriptions", - platform="openstack") -class CreateUserAndListTraitDescriptions( - cutils.CeilometerScenario, kbasic.KeystoneBasic): - - def run(self): - """Create user and fetch all trait descriptions. - - This scenario creates user to store new event and - fetches list of all traits for certain event type using - GET /v2/event_types//traits. - """ - self.admin_keystone.create_user() - event = self._list_events()[0] - self._list_event_trait_descriptions(event_type=event.event_type) diff --git a/rally/plugins/openstack/scenarios/ceilometer/utils.py b/rally/plugins/openstack/scenarios/ceilometer/utils.py deleted file mode 100644 index ff2ea7ee93..0000000000 --- a/rally/plugins/openstack/scenarios/ceilometer/utils.py +++ /dev/null @@ -1,467 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime as dt -import uuid - -import six - -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils as bench_utils - - -class CeilometerScenario(scenario.OpenStackScenario): - """Base class for Ceilometer scenarios with basic atomic actions.""" - - def _make_samples(self, count=1, interval=0, counter_name="cpu_util", - counter_type="gauge", counter_unit="%", counter_volume=1, - project_id=None, user_id=None, source=None, - timestamp=None, metadata_list=None, batch_size=None): - """Prepare and return a list of samples. - - :param count: specifies number of samples in array - :param interval: specifies interval between timestamps of near-by - samples - :param counter_name: specifies name of the counter - :param counter_type: specifies type of the counter - :param counter_unit: specifies unit of the counter - :param counter_volume: specifies volume of the counter - :param project_id: specifies project id for samples - :param user_id: specifies user id for samples - :param source: specifies source for samples - :param timestamp: specifies timestamp for samples - :param metadata_list: specifies list of resource metadata - :param batch_size: specifies number of samples to store in one query - :returns: generator that produces lists of samples - """ - batch_size = batch_size or count - sample = { - "counter_name": counter_name, - "counter_type": counter_type, - "counter_unit": counter_unit, - "counter_volume": counter_volume, - "resource_id": str(uuid.uuid4()) - } - opt_fields = { - "project_id": project_id, - "user_id": user_id, - "source": source, - "timestamp": timestamp, - } - for k, v in opt_fields.items(): - if v: - sample.update({k: v}) - len_meta = len(metadata_list) if metadata_list else 0 - now = timestamp or dt.datetime.utcnow() - samples = [] - for i in six.moves.xrange(count): - if i and not (i % batch_size): - yield samples - samples = [] - sample_item = dict(sample) - sample_item["timestamp"] = ( - now - dt.timedelta(seconds=(interval * i)) - ).isoformat() - if metadata_list: - # NOTE(idegtiarov): Adding more than one template of metadata - # required it's proportional distribution among whole samples. - sample_item["resource_metadata"] = metadata_list[ - i * len_meta // count - ] - samples.append(sample_item) - yield samples - - def _make_query_item(self, field, op="eq", value=None): - """Create a SimpleQuery item for requests. - - :param field: filtered field - :param op: operator for filtering - :param value: matched value - - :returns: dict with field, op and value keys for query - """ - return {"field": field, "op": op, "value": value} - - def _make_general_query(self, filter_by_project_id=None, - filter_by_user_id=None, - filter_by_resource_id=None, - metadata_query=None): - """Create a SimpleQuery used by samples list API. - - :param filter_by_project_id: add a project id to query - :param filter_by_user_id: add a user id to query - :param filter_by_resource_id: add a resource id to query - :param metadata_query: metadata dict that will add to query - - :returns: SimpleQuery with specified items - - """ - query = [] - metadata_query = metadata_query or {} - - if filter_by_user_id: - query.append(self._make_query_item("user_id", "eq", - self.context["user"]["id"])) - if filter_by_project_id: - query.append(self._make_query_item( - "project_id", "eq", self.context["tenant"]["id"])) - if filter_by_resource_id: - query.append(self._make_query_item( - "resource_id", "eq", self.context["tenant"]["resources"][0])) - - for key, value in metadata_query.items(): - query.append(self._make_query_item("metadata.%s" % key, - value=value)) - return query - - def _make_timestamp_query(self, start_time=None, end_time=None): - """Create ceilometer query for timestamp range. - - :param start_time: start datetime in isoformat - :param end_time: end datetime in isoformat - :returns: query with timestamp range - """ - query = [] - if end_time and start_time and end_time < start_time: - msg = "End time should be great or equal than start time" - raise exceptions.InvalidArgumentsException(msg) - if start_time: - query.append(self._make_query_item("timestamp", ">=", start_time)) - if end_time: - query.append(self._make_query_item("timestamp", "<=", end_time)) - return query - - def _make_profiler_key(self, method, query=None, limit=None): - """Create key for profiling method with query. - - :param method: Original profiler tag for method - :param query: ceilometer query which fields will be added to key - :param limit: if it exists `limit` will be added to key - :returns: profiler key that includes method and queried fields - """ - query = query or [] - limit_line = limit and "limit" or "" - fields_line = "&".join("%s" % a["field"] for a in query) - key_identifiers = "&".join(x for x in (limit_line, fields_line) if x) - key = ":".join(x for x in (method, key_identifiers) if x) - return key - - def _get_alarm_dict(self, **kwargs): - """Prepare and return an alarm dict for creating an alarm. - - :param kwargs: optional parameters to create alarm - :returns: alarm dictionary used to create an alarm - """ - alarm_id = self.generate_random_name() - alarm = {"alarm_id": alarm_id, - "name": alarm_id, - "description": "Test Alarm"} - - alarm.update(kwargs) - return alarm - - @atomic.action_timer("ceilometer.list_alarms") - def _list_alarms(self, alarm_id=None): - """List alarms. - - List alarm matching alarm_id. It fetches all alarms - if alarm_id is None. - - :param alarm_id: specifies id of the alarm - :returns: list of alarms - """ - if alarm_id: - return self.clients("ceilometer").alarms.get(alarm_id) - else: - return self.clients("ceilometer").alarms.list() - - @atomic.action_timer("ceilometer.get_alarm") - def _get_alarm(self, alarm_id): - """Get detailed information of an alarm. - - :param alarm_id: Specifies id of the alarm - :returns: If alarm_id is existed and correct, returns - detailed information of an alarm, else returns None - """ - return self.clients("ceilometer").alarms.get(alarm_id) - - @atomic.action_timer("ceilometer.create_alarm") - def _create_alarm(self, meter_name, threshold, kwargs): - """Create an alarm. - - :param meter_name: specifies meter name of the alarm - :param threshold: specifies alarm threshold - :param kwargs: contains optional features of alarm to be created - :returns: alarm - """ - alarm_dict = self._get_alarm_dict(**kwargs) - alarm_dict.update({"meter_name": meter_name, - "threshold": threshold}) - alarm = self.clients("ceilometer").alarms.create(**alarm_dict) - return alarm - - @atomic.action_timer("ceilometer.delete_alarm") - def _delete_alarm(self, alarm_id): - """Delete an alarm. - - :param alarm_id: specifies id of the alarm - """ - self.clients("ceilometer").alarms.delete(alarm_id) - - @atomic.action_timer("ceilometer.update_alarm") - def _update_alarm(self, alarm_id, alarm_dict_delta): - """Update an alarm. - - :param alarm_id: specifies id of the alarm - :param alarm_dict_delta: features of alarm to be updated - """ - self.clients("ceilometer").alarms.update(alarm_id, **alarm_dict_delta) - - @atomic.action_timer("ceilometer.get_alarm_history") - def _get_alarm_history(self, alarm_id): - """Assemble the alarm history requested. - - :param alarm_id: specifies id of the alarm - :returns: list of alarm changes - """ - return self.clients("ceilometer").alarms.get_history(alarm_id) - - @atomic.action_timer("ceilometer.get_alarm_state") - def _get_alarm_state(self, alarm_id): - """Get the state of the alarm. - - :param alarm_id: specifies id of the alarm - :returns: state of the alarm - """ - return self.clients("ceilometer").alarms.get_state(alarm_id) - - @atomic.action_timer("ceilometer.set_alarm_state") - def _set_alarm_state(self, alarm, state, timeout): - """Set the state of the alarm. - - :param alarm: alarm instance - :param state: an alarm state to be set - :param timeout: The number of seconds for which to attempt a - successful check of the alarm state. - :returns: alarm in the set state - """ - self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state) - return bench_utils.wait_for_status(alarm, - ready_statuses=[state], - update_resource=bench_utils - .get_from_manager(), - timeout=timeout, check_interval=1) - - @atomic.action_timer("ceilometer.list_events") - def _list_events(self): - """Get list of user's events. - - It fetches all events. - :returns: list of events - """ - return self.admin_clients("ceilometer").events.list() - - @atomic.action_timer("ceilometer.get_event") - def _get_event(self, event_id): - """Get event with specific id. - - Get event matching event_id. - - :param event_id: specifies id of the event - :returns: event - """ - return self.admin_clients("ceilometer").events.get(event_id) - - @atomic.action_timer("ceilometer.list_event_types") - def _list_event_types(self): - """Get list of all event types. - - :returns: list of event types - """ - return self.admin_clients("ceilometer").event_types.list() - - @atomic.action_timer("ceilometer.list_event_traits") - def _list_event_traits(self, event_type, trait_name): - """Get list of event traits. - - :param event_type: specifies the type of event - :param trait_name: specifies trait name - :returns: list of event traits - """ - return self.admin_clients("ceilometer").traits.list(event_type, - trait_name) - - @atomic.action_timer("ceilometer.list_event_trait_descriptions") - def _list_event_trait_descriptions(self, event_type): - """Get list of event trait descriptions. - - :param event_type: specifies the type of event - :returns: list of event trait descriptions - """ - return self.admin_clients("ceilometer").trait_descriptions.list( - event_type) - - def _list_samples(self, query=None, limit=None): - """List all Samples. - - :param query: optional param that specify query - :param limit: optional param for maximum number of samples returned - :returns: list of samples - """ - key = self._make_profiler_key("ceilometer.list_samples", query, - limit) - with atomic.ActionTimer(self, key): - return self.clients("ceilometer").new_samples.list(q=query, - limit=limit) - - @atomic.action_timer("ceilometer.get_resource") - def _get_resource(self, resource_id): - """Retrieve details about one resource.""" - return self.clients("ceilometer").resources.get(resource_id) - - @atomic.action_timer("ceilometer.get_stats") - def _get_stats(self, meter_name, query=None, period=None, groupby=None, - aggregates=None): - """Get stats for a specific meter. - - :param meter_name: Name of ceilometer meter - :param query: list of queries - :param period: the length of the time range covered by these stats - :param groupby: the fields used to group the samples - :param aggregates: function for samples aggregation - - :returns: list of statistics data - """ - return self.clients("ceilometer").statistics.list(meter_name, q=query, - period=period, - groupby=groupby, - aggregates=aggregates - ) - - @atomic.action_timer("ceilometer.create_meter") - def _create_meter(self, **kwargs): - """Create a new meter. - - :param kwargs: Contains the optional attributes for meter creation - :returns: Newly created meter - """ - name = self.generate_random_name() - samples = self.clients("ceilometer").samples.create( - counter_name=name, **kwargs) - return samples[0] - - @atomic.action_timer("ceilometer.query_alarms") - def _query_alarms(self, filter, orderby, limit): - """Query alarms with specific parameters. - - If no input params are provided, it returns all the results - in the database. - - :param limit: optional param for maximum number of results returned - :param orderby: optional param for specifying ordering of results - :param filter: optional filter query - :returns: queried alarms - """ - return self.clients("ceilometer").query_alarms.query( - filter, orderby, limit) - - @atomic.action_timer("ceilometer.query_alarm_history") - def _query_alarm_history(self, filter, orderby, limit): - """Query history of an alarm. - - If no input params are provided, it returns all the results - in the database. - - :param limit: optional param for maximum number of results returned - :param orderby: optional param for specifying ordering of results - :param filter: optional filter query - :returns: alarm history - """ - return self.clients("ceilometer").query_alarm_history.query( - filter, orderby, limit) - - @atomic.action_timer("ceilometer.create_sample") - def _create_sample(self, counter_name, counter_type, counter_unit, - counter_volume, resource_id=None, **kwargs): - """Create a Sample with specified parameters. - - :param counter_name: specifies name of the counter - :param counter_type: specifies type of the counter - :param counter_unit: specifies unit of the counter - :param counter_volume: specifies volume of the counter - :param resource_id: specifies resource id for the sample created - :param kwargs: contains optional parameters for creating a sample - :returns: created sample - """ - kwargs.update({"counter_name": counter_name, - "counter_type": counter_type, - "counter_unit": counter_unit, - "counter_volume": counter_volume, - "resource_id": resource_id if resource_id - else self.generate_random_name()}) - return self.clients("ceilometer").samples.create(**kwargs) - - @atomic.action_timer("ceilometer.create_samples") - def _create_samples(self, samples): - """Create Samples with specified parameters. - - :param samples: a list of samples to create - :returns: created list samples - """ - return self.clients("ceilometer").samples.create_list(samples) - - @atomic.action_timer("ceilometer.query_samples") - def _query_samples(self, filter, orderby, limit): - """Query samples with specified parameters. - - If no input params are provided, it returns all the results - in the database. - - :param limit: optional param for maximum number of results returned - :param orderby: optional param for specifying ordering of results - :param filter: optional filter query - :returns: queried samples - """ - return self.clients("ceilometer").query_samples.query( - filter, orderby, limit) - - def _list_resources(self, query=None, limit=None): - """List all resources. - - :param query: query list for Ceilometer api - :param limit: count of returned resources - :returns: list of all resources - """ - - key = self._make_profiler_key("ceilometer.list_resources", query, - limit) - with atomic.ActionTimer(self, key): - return self.clients("ceilometer").resources.list(q=query, - limit=limit) - - def _list_meters(self, query=None, limit=None): - """Get list of user's meters. - - :param query: query list for Ceilometer api - :param limit: count of returned meters - :returns: list of all meters - """ - - key = self._make_profiler_key("ceilometer.list_meters", query, - limit) - with atomic.ActionTimer(self, key): - return self.clients("ceilometer").meters.list(q=query, - limit=limit) diff --git a/rally/plugins/openstack/scenarios/cinder/__init__.py b/rally/plugins/openstack/scenarios/cinder/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/cinder/qos_specs.py b/rally/plugins/openstack/scenarios/cinder/qos_specs.py deleted file mode 100644 index bcbb72e4c0..0000000000 --- a/rally/plugins/openstack/scenarios/cinder/qos_specs.py +++ /dev/null @@ -1,134 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.task import validation - - -"""Scenarios for Cinder QoS.""" - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderQos.create_and_list_qos", platform="openstack") -class CreateAndListQos(cinder_utils.CinderBasic): - def run(self, consumer, write_iops_sec, read_iops_sec): - """Create a qos, then list all qos. - - :param consumer: Consumer behavior - :param write_iops_sec: random write limitation - :param read_iops_sec: random read limitation - """ - specs = { - "consumer": consumer, - "write_iops_sec": write_iops_sec, - "read_iops_sec": read_iops_sec - } - - qos = self.admin_cinder.create_qos(specs) - - pool_list = self.admin_cinder.list_qos() - msg = ("Qos not included into list of available qos\n" - "created qos:{}\n" - "Pool of qos:{}").format(qos, pool_list) - self.assertIn(qos, pool_list, err_msg=msg) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderQos.create_and_get_qos", platform="openstack") -class CreateAndGetQos(cinder_utils.CinderBasic): - def run(self, consumer, write_iops_sec, read_iops_sec): - """Create a qos, then get details of the qos. - - :param consumer: Consumer behavior - :param write_iops_sec: random write limitation - :param read_iops_sec: random read limitation - """ - specs = { - "consumer": consumer, - "write_iops_sec": write_iops_sec, - "read_iops_sec": read_iops_sec - } - - qos = self.admin_cinder.create_qos(specs) - self.admin_cinder.get_qos(qos.id) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderQos.create_and_set_qos", platform="openstack") -class CreateAndSetQos(cinder_utils.CinderBasic): - def run(self, consumer, write_iops_sec, read_iops_sec, - set_consumer, set_write_iops_sec, set_read_iops_sec): - """Create a qos, then Add/Update keys in qos specs. - - :param consumer: Consumer behavior - :param write_iops_sec: random write limitation - :param read_iops_sec: random read limitation - :param set_consumer: update Consumer behavior - :param set_write_iops_sec: update random write limitation - :param set_read_iops_sec: update random read limitation - """ - create_specs = { - "consumer": consumer, - "write_iops_sec": write_iops_sec, - "read_iops_sec": read_iops_sec - } - set_specs = { - "consumer": set_consumer, - "write_iops_sec": set_write_iops_sec, - "read_iops_sec": set_read_iops_sec - } - - qos = self.admin_cinder.create_qos(create_specs) - self.admin_cinder.set_qos(qos=qos, set_specs_args=set_specs) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@validation.add("required_contexts", contexts=("volume_types")) -@scenario.configure( - context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderQos.create_qos_associate_and_disassociate_type", - platform="openstack") -class CreateQosAssociateAndDisassociateType(cinder_utils.CinderBasic): - def run(self, consumer, write_iops_sec, read_iops_sec): - """Create a qos, Associate and Disassociate the qos from volume type. - - :param consumer: Consumer behavior - :param write_iops_sec: random write limitation - :param read_iops_sec: random read limitation - """ - specs = { - "consumer": consumer, - "write_iops_sec": write_iops_sec, - "read_iops_sec": read_iops_sec - } - - qos = self.admin_cinder.create_qos(specs) - - vt_idx = self.context["iteration"] % len(self.context["volume_types"]) - volume_type = self.context["volume_types"][vt_idx] - - self.admin_cinder.qos_associate_type(qos_specs=qos, - volume_type=volume_type["id"]) - - self.admin_cinder.qos_disassociate_type(qos_specs=qos, - volume_type=volume_type["id"]) diff --git a/rally/plugins/openstack/scenarios/cinder/utils.py b/rally/plugins/openstack/scenarios/cinder/utils.py deleted file mode 100644 index 7e2c4f87d2..0000000000 --- a/rally/plugins/openstack/scenarios/cinder/utils.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright 2013 Huawei Technologies Co.,LTD. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import cfg -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.services.storage import block -from rally.plugins.openstack.wrappers import cinder as cinder_wrapper -from rally.plugins.openstack.wrappers import glance as glance_wrapper -from rally.task import atomic -from rally.task import utils as bench_utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class CinderBasic(scenario.OpenStackScenario): - def __init__(self, context=None, admin_clients=None, clients=None): - super(CinderBasic, self).__init__(context, admin_clients, clients) - if hasattr(self, "_admin_clients"): - self.admin_cinder = block.BlockStorage( - self._admin_clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - if hasattr(self, "_clients"): - self.cinder = block.BlockStorage( - self._clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - - def get_random_server(self): - server_id = random.choice(self.context["tenant"]["servers"]) - return self.clients("nova").servers.get(server_id) - - -class CinderScenario(scenario.OpenStackScenario): - """Base class for Cinder scenarios with basic atomic actions.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(CinderScenario, self).__init__(context, admin_clients, clients) - LOG.warning( - "Class %s is deprecated since Rally 0.10.0 and will be removed " - "soon. Use " - "rally.plugins.openstack.services.storage.block.BlockStorage " - "instead." % self.__class__) - - @atomic.action_timer("cinder.list_volumes") - def _list_volumes(self, detailed=True): - """Returns user volumes list.""" - - return self.clients("cinder").volumes.list(detailed) - - @atomic.action_timer("cinder.get_volume") - def _get_volume(self, volume_id): - """get volume detailed information. - - :param volume_id: id of volume - :returns: class:`Volume` - """ - return self.clients("cinder").volumes.get(volume_id) - - @atomic.action_timer("cinder.list_snapshots") - def _list_snapshots(self, detailed=True): - """Returns user snapshots list.""" - - return self.clients("cinder").volume_snapshots.list(detailed) - - @atomic.action_timer("cinder.list_types") - def _list_types(self, search_opts=None, is_public=None): - """Lists all volume types. - - :param search_opts: Options used when search for volume types - :param is_public: If query public volume type - :returns: A list of volume types - """ - return self.clients("cinder").volume_types.list(search_opts, - is_public) - - def _set_metadata(self, volume, sets=10, set_size=3): - """Set volume metadata. - - :param volume: The volume to set metadata on - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :returns: A list of keys that were set - """ - key = "cinder.set_%s_metadatas_%s_times" % (set_size, sets) - with atomic.ActionTimer(self, key): - keys = [] - for i in range(sets): - metadata = {} - for j in range(set_size): - key = self.generate_random_name() - keys.append(key) - metadata[key] = self.generate_random_name() - - self.clients("cinder").volumes.set_metadata(volume, metadata) - return keys - - def _delete_metadata(self, volume, keys, deletes=10, delete_size=3): - """Delete volume metadata keys. - - Note that ``len(keys)`` must be greater than or equal to - ``deletes * delete_size``. - - :param volume: The volume to delete metadata from - :param deletes: how many operations to perform - :param delete_size: number of metadata keys to delete in each operation - :param keys: a list of keys to choose deletion candidates from - """ - if len(keys) < deletes * delete_size: - raise exceptions.InvalidArgumentsException( - "Not enough metadata keys to delete: " - "%(num_keys)s keys, but asked to delete %(num_deletes)s" % - {"num_keys": len(keys), - "num_deletes": deletes * delete_size}) - # make a shallow copy of the list of keys so that, when we pop - # from it later, we don't modify the original list. - keys = list(keys) - random.shuffle(keys) - action_name = "cinder.delete_%s_metadatas_%s_times" % (delete_size, - deletes) - with atomic.ActionTimer(self, action_name): - for i in range(deletes): - to_del = keys[i * delete_size:(i + 1) * delete_size] - self.clients("cinder").volumes.delete_metadata(volume, to_del) - - @atomic.action_timer("cinder.create_volume") - def _create_volume(self, size, **kwargs): - """Create one volume. - - Returns when the volume is actually created and is in the "Available" - state. - - :param size: int be size of volume in GB, or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param kwargs: Other optional parameters to initialize the volume - :returns: Created volume object - """ - if isinstance(size, dict): - size = random.randint(size["min"], size["max"]) - - client = cinder_wrapper.wrap(self._clients.cinder, self) - volume = client.create_volume(size, **kwargs) - - # NOTE(msdubov): It is reasonable to wait 5 secs before starting to - # check whether the volume is ready => less API calls. - self.sleep_between(CONF.openstack.cinder_volume_create_prepoll_delay) - - volume = bench_utils.wait_for_status( - volume, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - return volume - - @atomic.action_timer("cinder.update_volume") - def _update_volume(self, volume, **update_volume_args): - """Update name and description for this volume - - This atomic function updates volume information. The volume - display name is always changed, and additional update - arguments may also be specified. - - :param volume: volume object - :param update_volume_args: dict, contains values to be updated. - """ - client = cinder_wrapper.wrap(self._clients.cinder, self) - client.update_volume(volume, **update_volume_args) - - @atomic.action_timer("cinder.update_readonly_flag") - def _update_readonly_flag(self, volume, read_only): - """Update the read-only access mode flag of the specified volume. - - :param volume: The UUID of the volume to update. - :param read_only: The value to indicate whether to update volume to - read-only access mode. - :returns: A tuple of http Response and body - """ - return self.clients("cinder").volumes.update_readonly_flag( - volume, read_only) - - @atomic.action_timer("cinder.delete_volume") - def _delete_volume(self, volume): - """Delete the given volume. - - Returns when the volume is actually deleted. - - :param volume: volume object - """ - volume.delete() - bench_utils.wait_for_status( - volume, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_volume_delete_timeout, - check_interval=CONF.openstack.cinder_volume_delete_poll_interval - ) - - @atomic.action_timer("cinder.extend_volume") - def _extend_volume(self, volume, new_size): - """Extend the given volume. - - Returns when the volume is actually extended. - - :param volume: volume object - :param new_size: new volume size in GB, or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - Notice: should be bigger volume size - """ - - if isinstance(new_size, dict): - new_size = random.randint(new_size["min"], new_size["max"]) - - volume.extend(volume, new_size) - volume = bench_utils.wait_for_status( - volume, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - - @atomic.action_timer("cinder.upload_volume_to_image") - def _upload_volume_to_image(self, volume, force=False, - container_format="bare", disk_format="raw"): - """Upload the given volume to image. - - Returns created image. - - :param volume: volume object - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso - :returns: Returns created image object - """ - resp, img = volume.upload_to_image(force, self.generate_random_name(), - container_format, disk_format) - # NOTE (e0ne): upload_to_image changes volume status to uploading so - # we need to wait until it will be available. - volume = bench_utils.wait_for_status( - volume, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - image_id = img["os-volume_upload_image"]["image_id"] - image = self.clients("glance").images.get(image_id) - wrapper = glance_wrapper.wrap(self._clients.glance, self) - image = bench_utils.wait_for_status( - image, - ready_statuses=["active"], - update_resource=wrapper.get_image, - timeout=CONF.openstack.glance_image_create_timeout, - check_interval=CONF.openstack.glance_image_create_poll_interval - ) - - return image - - @atomic.action_timer("cinder.create_snapshot") - def _create_snapshot(self, volume_id, force=False, **kwargs): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param kwargs: Other optional parameters to initialize the volume - :returns: Created snapshot object - """ - kwargs["force"] = force - - client = cinder_wrapper.wrap(self._clients.cinder, self) - snapshot = client.create_snapshot(volume_id, **kwargs) - - self.sleep_between(CONF.openstack.cinder_volume_create_prepoll_delay) - snapshot = bench_utils.wait_for_status( - snapshot, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - return snapshot - - @atomic.action_timer("cinder.delete_snapshot") - def _delete_snapshot(self, snapshot): - """Delete the given snapshot. - - Returns when the snapshot is actually deleted. - - :param snapshot: snapshot object - """ - snapshot.delete() - bench_utils.wait_for_status( - snapshot, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_volume_delete_timeout, - check_interval=CONF.openstack.cinder_volume_delete_poll_interval - ) - - @atomic.action_timer("cinder.create_backup") - def _create_backup(self, volume_id, **kwargs): - """Create a volume backup of the given volume. - - :param volume_id: The ID of the volume to backup. - :param kwargs: Other optional parameters - """ - backup = self.clients("cinder").backups.create(volume_id, **kwargs) - return bench_utils.wait_for_status( - backup, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - - @atomic.action_timer("cinder.delete_backup") - def _delete_backup(self, backup): - """Delete the given backup. - - Returns when the backup is actually deleted. - - :param backup: backup instance - """ - backup.delete() - bench_utils.wait_for_status( - backup, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_volume_delete_timeout, - check_interval=CONF.openstack.cinder_volume_delete_poll_interval - ) - - @atomic.action_timer("cinder.restore_backup") - def _restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - """ - restore = self.clients("cinder").restores.restore(backup_id, volume_id) - restored_volume = self.clients("cinder").volumes.get(restore.volume_id) - backup_for_restore = self.clients("cinder").backups.get(backup_id) - bench_utils.wait_for_status( - backup_for_restore, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_backup_restore_timeout, - check_interval=CONF.openstack.cinder_backup_restore_poll_interval - ) - return bench_utils.wait_for_status( - restored_volume, - ready_statuses=["available"], - update_resource=bench_utils.get_from_manager(), - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - - @atomic.action_timer("cinder.list_backups") - def _list_backups(self, detailed=True): - """Return user volume backups list. - - :param detailed: True if detailed information about backup - should be listed - """ - return self.clients("cinder").backups.list(detailed) - - def get_random_server(self): - server_id = random.choice(self.context["tenant"]["servers"]) - return self.clients("nova").servers.get(server_id) - - @atomic.action_timer("cinder.list_transfers") - def _list_transfers(self, detailed=True, search_opts=None): - """Get a list of all volume transfers. - - :param detailed: If True, detailed information about transfer - should be listed - :param search_opts: Search options to filter out volume transfers - :returns: list of :class:`VolumeTransfer` - """ - return self.clients("cinder").transfers.list(detailed, search_opts) - - @atomic.action_timer("cinder.create_volume_type") - def _create_volume_type(self, **kwargs): - """create volume type. - - :param kwargs: Optional additional arguments for volume type creation - :returns: VolumeType object - """ - kwargs["name"] = self.generate_random_name() - return self.admin_clients("cinder").volume_types.create(**kwargs) - - @atomic.action_timer("cinder.delete_volume_type") - def _delete_volume_type(self, volume_type): - """delete a volume type. - - :param volume_type: Name or Id of the volume type - :returns: base on client response return True if the request - has been accepted or not - """ - tuple_res = self.admin_clients("cinder").volume_types.delete( - volume_type) - return (tuple_res[0].status_code == 202) - - @atomic.action_timer("cinder.set_volume_type_keys") - def _set_volume_type_keys(self, volume_type, metadata): - """Set extra specs on a volume type. - - :param volume_type: The :class:`VolumeType` to set extra spec on - :param metadata: A dict of key/value pairs to be set - :returns: extra_specs if the request has been accepted - """ - return volume_type.set_keys(metadata) - - @atomic.action_timer("cinder.get_volume_type") - def _get_volume_type(self, volume_type): - """get details of volume_type. - - :param volume_type: The ID of the :class:`VolumeType` to get - :rtype: :class:`VolumeType` - """ - return self.admin_clients("cinder").volume_types.get(volume_type) - - @atomic.action_timer("cinder.transfer_create") - def _transfer_create(self, volume_id): - """Create a volume transfer. - - :param volume_id: The ID of the volume to transfer - :rtype: VolumeTransfer - """ - name = self.generate_random_name() - return self.clients("cinder").transfers.create(volume_id, name) - - @atomic.action_timer("cinder.transfer_accept") - def _transfer_accept(self, transfer_id, auth_key): - """Accept a volume transfer. - - :param transfer_id: The ID of the transfer to accept. - :param auth_key: The auth_key of the transfer. - :rtype: VolumeTransfer - """ - return self.clients("cinder").transfers.accept(transfer_id, auth_key) - - @atomic.action_timer("cinder.create_encryption_type") - def _create_encryption_type(self, volume_type, specs): - """Create encryption type for a volume type. Default: admin only. - - :param volume_type: the volume type on which to add an encryption type - :param specs: the encryption type specifications to add - :return: an instance of :class: VolumeEncryptionType - """ - return self.admin_clients("cinder").volume_encryption_types.create( - volume_type, specs) - - @atomic.action_timer("cinder.list_encryption_type") - def _list_encryption_type(self, search_opts=None): - """List all volume encryption types. - - :param search_opts: Options used when search for encryption types - :return: a list of :class: VolumeEncryptionType instances - """ - return self.admin_clients("cinder").volume_encryption_types.list( - search_opts) - - @atomic.action_timer("cinder.delete_encryption_type") - def _delete_encryption_type(self, volume_type): - """Delete the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be deleted - """ - resp = self.admin_clients("cinder").volume_encryption_types.delete( - volume_type) - if (resp[0].status_code != 202): - raise exceptions.RallyException("EncryptionType Deletion Failed") diff --git a/rally/plugins/openstack/scenarios/cinder/volume_backups.py b/rally/plugins/openstack/scenarios/cinder/volume_backups.py deleted file mode 100644 index 7c4ea6299f..0000000000 --- a/rally/plugins/openstack/scenarios/cinder/volume_backups.py +++ /dev/null @@ -1,61 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.task import validation - - -"""Scenarios for Cinder Volume Backup.""" - - -@validation.add("number", param_name="size", minval=1, integer_only=True) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names="name", - subdict="create_backup_kwargs") -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_cinder_services", services="cinder-backup") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumeBackups.create_incremental_volume_backup", - platform="openstack") -class CreateIncrementalVolumeBackup(cinder_utils.CinderBasic): - def run(self, size, do_delete=True, create_volume_kwargs=None, - create_backup_kwargs=None): - """Create a incremental volume backup. - - The scenario first create a volume, the create a backup, the backup - is full backup. Because Incremental backup must be based on the - full backup. finally create a incremental backup. - - :param size: volume size in GB - :param do_delete: deletes backup and volume after creating if True - :param create_volume_kwargs: optional args to create a volume - :param create_backup_kwargs: optional args to create a volume backup - """ - create_volume_kwargs = create_volume_kwargs or {} - create_backup_kwargs = create_backup_kwargs or {} - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - backup1 = self.cinder.create_backup(volume.id, **create_backup_kwargs) - - backup2 = self.cinder.create_backup(volume.id, incremental=True) - - if do_delete: - self.cinder.delete_backup(backup2) - self.cinder.delete_backup(backup1) - self.cinder.delete_volume(volume) diff --git a/rally/plugins/openstack/scenarios/cinder/volume_types.py b/rally/plugins/openstack/scenarios/cinder/volume_types.py deleted file mode 100644 index 991a2b681a..0000000000 --- a/rally/plugins/openstack/scenarios/cinder/volume_types.py +++ /dev/null @@ -1,406 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.services.storage import cinder_v2 -from rally.task import validation - - -LOG = logging.getLogger(__name__) - - -"""Scenarios for Cinder Volume Type.""" - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_and_delete_volume_type", - platform="openstack") -class CreateAndDeleteVolumeType(cinder_utils.CinderBasic): - - def run(self, description=None, is_public=True): - """Create and delete a volume Type. - - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - self.admin_cinder.delete_volume_type(volume_type) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_and_get_volume_type", - platform="openstack") -class CreateAndGetVolumeType(cinder_utils.CinderBasic): - - def run(self, description=None, is_public=True): - """Create a volume Type, then get the details of the type. - - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - self.admin_cinder.get_volume_type(volume_type) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_api_versions", component="cinder", versions=["2"]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_and_update_volume_type", - platform="openstack") -class CreateAndUpdateVolumeType(scenario.OpenStackScenario): - - def run(self, description=None, is_public=True, update_name=False, - update_description=None, update_is_public=None): - """create a volume type, then update the type. - - :param description: Description of the volume type - :param is_public: Volume type visibility - :param update_name: if True, can update name by generating random name. - if False, don't update name. - :param update_description: update Description of the volume type - :param update_is_public: update Volume type visibility - """ - service = cinder_v2.CinderV2Service(self._admin_clients, - self.generate_random_name, - atomic_inst=self.atomic_actions()) - - volume_type = service.create_volume_type( - description=description, - is_public=is_public) - - service.update_volume_type( - volume_type, - name=volume_type.name if not update_name else False, - description=update_description, - is_public=update_is_public) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_and_list_volume_types", - platform="openstack") -class CreateAndListVolumeTypes(cinder_utils.CinderBasic): - - def run(self, description=None, is_public=True): - """Create a volume Type, then list all types. - - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - - pool_list = self.admin_cinder.list_types() - msg = ("type not included into list of available types" - "created type: {}\n" - "pool of types: {}\n").format(volume_type, pool_list) - self.assertIn(volume_type.id, - [vtype.id for vtype in pool_list], - err_msg=msg) - - -@validation.add("required_params", params=[("create_specs", "provider")]) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure( - context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_volume_type_and_encryption_type", - platform="openstack") -class CreateVolumeTypeAndEncryptionType(cinder_utils.CinderBasic): - - def run(self, create_specs=None, provider=None, cipher=None, - key_size=None, control_location="front-end", description=None, - is_public=True): - """Create encryption type - - This scenario first creates a volume type, then creates an encryption - type for the volume type. - - :param create_specs: The encryption type specifications to add. - DEPRECATED, specify arguments explicitly. - :param provider: The class that provides encryption support. For - example, LuksEncryptor. - :param cipher: The encryption algorithm or mode. - :param key_size: Size of encryption key, in bits. - :param control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - if create_specs is None: - specs = { - "provider": provider, - "cipher": cipher, - "key_size": key_size, - "control_location": control_location - } - else: - LOG.warning("The argument `create_spec` is deprecated since" - " Rally 0.10.0. Specify all arguments from it" - " explicitly.") - specs = create_specs - self.admin_cinder.create_encryption_type(volume_type, - specs=specs) - - -@validation.add("required_params", params=[("create_specs", "provider")]) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure( - context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_and_list_encryption_type", - platform="openstack") -class CreateAndListEncryptionType(cinder_utils.CinderBasic): - - def run(self, create_specs=None, provider=None, cipher=None, - key_size=None, control_location="front-end", search_opts=None): - """Create and list encryption type - - This scenario firstly creates a volume type, secondly creates an - encryption type for the volume type, thirdly lists all encryption - types. - - :param create_specs: The encryption type specifications to add. - DEPRECATED, specify arguments explicitly. - :param provider: The class that provides encryption support. For - example, LuksEncryptor. - :param cipher: The encryption algorithm or mode. - :param key_size: Size of encryption key, in bits. - :param control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - :param search_opts: Options used when search for encryption types - """ - vt_idx = self.context["iteration"] % len(self.context["volume_types"]) - volume_type = self.context["volume_types"][vt_idx] - if create_specs is None: - specs = { - "provider": provider, - "cipher": cipher, - "key_size": key_size, - "control_location": control_location - } - else: - LOG.warning("The argument `create_spec` is deprecated since" - " Rally 0.10.0. Specify all arguments from it" - " explicitly.") - specs = create_specs - self.admin_cinder.create_encryption_type(volume_type["id"], - specs=specs) - self.admin_cinder.list_encryption_type(search_opts) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_and_set_volume_type_keys", - platform="openstack") -class CreateAndSetVolumeTypeKeys(cinder_utils.CinderBasic): - - def run(self, volume_type_key, description=None, is_public=True): - """Create and set a volume type's extra specs. - - :param volume_type_key: A dict of key/value pairs to be set - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - volume_type = self.admin_cinder.create_volume_type( - description=description, - is_public=is_public) - self.admin_cinder.set_volume_type_keys(volume_type, - metadata=volume_type_key) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_contexts", contexts="volume_types") -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure( - context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_get_and_delete_encryption_type", - platform="openstack") -class CreateGetAndDeleteEncryptionType(cinder_utils.CinderBasic): - - def run(self, provider=None, cipher=None, - key_size=None, control_location="front-end"): - """Create get and delete an encryption type - - This scenario firstly creates an encryption type for a volome - type created in the context, then gets detailed information of - the created encryption type, finally deletes the created - encryption type. - - :param provider: The class that provides encryption support. For - example, LuksEncryptor. - :param cipher: The encryption algorithm or mode. - :param key_size: Size of encryption key, in bits. - :param control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - """ - vt_idx = self.context["iteration"] % len(self.context["volume_types"]) - volume_type = self.context["volume_types"][vt_idx] - specs = { - "provider": provider, - "cipher": cipher, - "key_size": key_size, - "control_location": control_location - } - self.admin_cinder.create_encryption_type(volume_type["id"], - specs=specs) - self.admin_cinder.get_encryption_type(volume_type["id"]) - self.admin_cinder.delete_encryption_type(volume_type["id"]) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_contexts", contexts="volume_types") -@validation.add("required_params", params=[("create_specs", "provider")]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure( - context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_and_delete_encryption_type", - platform="openstack") -class CreateAndDeleteEncryptionType(cinder_utils.CinderBasic): - - def run(self, create_specs=None, provider=None, cipher=None, - key_size=None, control_location="front-end"): - """Create and delete encryption type - - This scenario firstly creates an encryption type for a given - volume type, then deletes the created encryption type. - - :param create_specs: the encryption type specifications to add - :param provider: The class that provides encryption support. For - example, LuksEncryptor. - :param cipher: The encryption algorithm or mode. - :param key_size: Size of encryption key, in bits. - :param control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - """ - vt_idx = self.context["iteration"] % len(self.context["volume_types"]) - volume_type = self.context["volume_types"][vt_idx] - if create_specs is None: - specs = { - "provider": provider, - "cipher": cipher, - "key_size": key_size, - "control_location": control_location - } - else: - LOG.warning("The argument `create_spec` is deprecated since" - " Rally 0.10.0. Specify all arguments from it" - " explicitly.") - specs = create_specs - self.admin_cinder.create_encryption_type(volume_type["id"], - specs=specs) - self.admin_cinder.delete_encryption_type(volume_type["id"]) - - -@validation.add("required_services", services=consts.Service.CINDER) -@validation.add("required_contexts", contexts="volume_types") -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_and_update_encryption_type", - platform="openstack") -class CreateAndUpdateEncryptionType(cinder_utils.CinderBasic): - - def run(self, create_provider=None, create_cipher=None, - create_key_size=None, create_control_location="front-end", - update_provider=None, update_cipher=None, - update_key_size=None, update_control_location=None): - """Create and update encryption type - - This scenario firstly creates a volume type, secondly creates an - encryption type for the volume type, thirdly updates the encryption - type. - - :param create_provider: The class that provides encryption support. For - example, LuksEncryptor. - :param create_cipher: The encryption algorithm or mode. - :param create_key_size: Size of encryption key, in bits. - :param create_control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - :param update_provider: The class that provides encryption support. For - example, LuksEncryptor. - :param update_cipher: The encryption algorithm or mode. - :param update_key_size: Size of encryption key, in bits. - :param update_control_location: Notional service where encryption is - performed. Valid values are "front-end" - or "back-end." - """ - vt_idx = self.context["iteration"] % len(self.context["volume_types"]) - volume_type = self.context["volume_types"][vt_idx] - create_specs = { - "provider": create_provider, - "cipher": create_cipher, - "key_size": create_key_size, - "control_location": create_control_location - } - update_specs = { - "provider": update_provider, - "cipher": update_cipher, - "key_size": update_key_size, - "control_location": update_control_location - } - self.admin_cinder.create_encryption_type(volume_type["id"], - specs=create_specs) - self.admin_cinder.update_encryption_type(volume_type["id"], - specs=update_specs) - - -@validation.add("required_platform", platform="openstack", admin=True) -@validation.add("required_api_versions", component="cinder", versions=["2"]) -@validation.add("required_services", services=consts.Service.CINDER) -@scenario.configure( - context={"admin_cleanup@openstack": ["cinder"]}, - name="CinderVolumeTypes.create_volume_type_add_and_list_type_access", - platform="openstack") -class CreateVolumeTypeAddAndListTypeAccess(scenario.OpenStackScenario): - - def run(self, description=None, is_public=False): - """Add and list volume type access for the given project. - - This scenario first creates a private volume type, then add project - access and list project access to it. - - :param description: Description of the volume type - :param is_public: Volume type visibility - """ - service = cinder_v2.CinderV2Service(self._admin_clients, - self.generate_random_name, - atomic_inst=self.atomic_actions()) - volume_type = service.create_volume_type(description=description, - is_public=is_public) - service.add_type_access(volume_type, - project=self.context["tenant"]["id"]) - service.list_type_access(volume_type) diff --git a/rally/plugins/openstack/scenarios/cinder/volumes.py b/rally/plugins/openstack/scenarios/cinder/volumes.py deleted file mode 100644 index 8ae8fea89b..0000000000 --- a/rally/plugins/openstack/scenarios/cinder/volumes.py +++ /dev/null @@ -1,870 +0,0 @@ -# Copyright 2013 Huawei Technologies Co.,LTD. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import logging -from rally import consts -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.scenarios.glance import images -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.task import atomic -from rally.task import types -from rally.task import validation - -LOG = logging.getLogger(__name__) - -"""Scenarios for Cinder Volumes.""" - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_list_volume", - platform="openstack") -class CreateAndListVolume(cinder_utils.CinderBasic): - - def run(self, size, detailed=True, image=None, **kwargs): - """Create a volume and list all volumes. - - Measure the "cinder volume-list" command performance. - - If you have only 1 user in your context, you will - add 1 volume on every iteration. So you will have more - and more volumes and will be able to measure the - performance of the "cinder volume-list" command depending on - the number of images owned by users. - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param detailed: determines whether the volume listing should contain - detailed information about all of them - :param image: image to be used to create volume - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - - self.cinder.create_volume(size, **kwargs) - self.cinder.list_volumes(detailed) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_get_volume", - platform="openstack") -class CreateAndGetVolume(cinder_utils.CinderBasic): - - def run(self, size, image=None, **kwargs): - """Create a volume and get the volume. - - Measure the "cinder show" command performance. - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create volume - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - - volume = self.cinder.create_volume(size, **kwargs) - self.cinder.get_volume(volume.id) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CinderVolumes.list_volumes", - platform="openstack") -class ListVolumes(cinder_utils.CinderBasic): - - def run(self, detailed=True): - """List all volumes. - - This simple scenario tests the cinder list command by listing - all the volumes. - - :param detailed: True if detailed information about volumes - should be listed - """ - - self.cinder.list_volumes(detailed) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CinderVolumes.list_types", platform="openstack") -class ListTypes(cinder_utils.CinderBasic): - - def run(self, search_opts=None, is_public=None): - """List all volume types. - - This simple scenario tests the cinder type-list command by listing - all the volume types. - - :param search_opts: Options used when search for volume types - :param is_public: If query public volume type - """ - - self.cinder.list_types(search_opts, is_public=is_public) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="CinderVolumes.list_transfers", platform="openstack") -class ListTransfers(cinder_utils.CinderBasic): - - def run(self, detailed=True, search_opts=None): - """List all transfers. - - This simple scenario tests the "cinder transfer-list" command by - listing all the volume transfers. - - :param detailed: If True, detailed information about volume transfer - should be listed - :param search_opts: Search options to filter out volume transfers. - """ - - self.cinder.list_transfers(detailed, search_opts=search_opts) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="update_volume_kwargs") -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_update_volume", - platform="openstack") -class CreateAndUpdateVolume(cinder_utils.CinderBasic): - - def run(self, size, image=None, create_volume_kwargs=None, - update_volume_kwargs=None): - """Create a volume and update its name and description. - - :param size: volume size (integer, in GB) - :param image: image to be used to create volume - :param create_volume_kwargs: dict, to be used to create volume - :param update_volume_kwargs: dict, to be used to update volume - update_volume_kwargs["update_name"]=True, if updating the - name of volume. - update_volume_kwargs["description"]="desp", if updating the - description of volume. - """ - create_volume_kwargs = create_volume_kwargs or {} - update_volume_kwargs = update_volume_kwargs or {} - if image: - create_volume_kwargs["imageRef"] = image - - if update_volume_kwargs.pop("update_name", False): - update_volume_kwargs["name"] = self.generate_random_name() - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - self.cinder.update_volume(volume, **update_volume_kwargs) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_delete_volume", - platform="openstack") -class CreateAndDeleteVolume(cinder_utils.CinderBasic): - - def run(self, size, image=None, min_sleep=0, max_sleep=0, **kwargs): - """Create and then delete a volume. - - Good for testing a maximal bandwidth of cloud. Optional 'min_sleep' - and 'max_sleep' parameters allow the scenario to simulate a pause - between volume creation and deletion (of random duration from - [min_sleep, max_sleep]). - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create volume - :param min_sleep: minimum sleep time between volume creation and - deletion (in seconds) - :param max_sleep: maximum sleep time between volume creation and - deletion (in seconds) - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - - volume = self.cinder.create_volume(size, **kwargs) - self.sleep_between(min_sleep, max_sleep) - self.cinder.delete_volume(volume) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_volume", - platform="openstack") -class CreateVolume(cinder_utils.CinderBasic): - - def run(self, size, image=None, **kwargs): - """Create a volume. - - Good test to check how influence amount of active volumes on - performance of creating new. - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create volume - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - - self.cinder.create_volume(size, **kwargs) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("volumes")) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.modify_volume_metadata", - platform="openstack") -class ModifyVolumeMetadata(cinder_utils.CinderBasic): - - def run(self, sets=10, set_size=3, deletes=5, delete_size=3): - """Modify a volume's metadata. - - This requires a volume to be created with the volumes - context. Additionally, ``sets * set_size`` must be greater - than or equal to ``deletes * delete_size``. - - :param sets: how many set_metadata operations to perform - :param set_size: number of metadata keys to set in each - set_metadata operation - :param deletes: how many delete_metadata operations to perform - :param delete_size: number of metadata keys to delete in each - delete_metadata operation - """ - if sets * set_size < deletes * delete_size: - raise exceptions.InvalidArgumentsException( - "Not enough metadata keys will be created: " - "Setting %(num_keys)s keys, but deleting %(num_deletes)s" % - {"num_keys": sets * set_size, - "num_deletes": deletes * delete_size}) - - volume = random.choice(self.context["tenant"]["volumes"]) - keys = self.cinder.set_metadata(volume["id"], sets=sets, - set_size=set_size) - self.cinder.delete_metadata(volume["id"], keys=keys, - deletes=deletes, - delete_size=delete_size) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_extend_volume", - platform="openstack") -class CreateAndExtendVolume(cinder_utils.CinderBasic): - - def run(self, size, new_size, min_sleep=0, max_sleep=0, **kwargs): - """Create and extend a volume and then delete it. - - - :param size: volume size (in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param new_size: volume new size (in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - to extend. - Notice: should be bigger volume size - :param min_sleep: minimum sleep time between volume extension and - deletion (in seconds) - :param max_sleep: maximum sleep time between volume extension and - deletion (in seconds) - :param kwargs: optional args to extend the volume - """ - volume = self.cinder.create_volume(size, **kwargs) - self.cinder.extend_volume(volume, new_size=new_size) - self.sleep_between(min_sleep, max_sleep) - self.cinder.delete_volume(volume) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_contexts", contexts=("volumes")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_from_volume_and_delete_volume", - platform="openstack") -class CreateFromVolumeAndDeleteVolume(cinder_utils.CinderBasic): - - def run(self, size, min_sleep=0, max_sleep=0, **kwargs): - """Create volume from volume and then delete it. - - Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep' - parameters allow the scenario to simulate a pause between volume - creation and deletion (of random duration from [min_sleep, max_sleep]). - - :param size: volume size (in GB), or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - Should be equal or bigger source volume size - - :param min_sleep: minimum sleep time between volume creation and - deletion (in seconds) - :param max_sleep: maximum sleep time between volume creation and - deletion (in seconds) - :param kwargs: optional args to create a volume - """ - source_vol = random.choice(self.context["tenant"]["volumes"]) - volume = self.cinder.create_volume(size, source_volid=source_vol["id"], - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self.cinder.delete_volume(volume) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_contexts", contexts=("volumes")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_delete_snapshot", - platform="openstack") -class CreateAndDeleteSnapshot(cinder_utils.CinderBasic): - - def run(self, force=False, min_sleep=0, max_sleep=0, **kwargs): - """Create and then delete a volume-snapshot. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between snapshot creation and deletion - (of random duration from [min_sleep, max_sleep]). - - :param force: when set to True, allows snapshot of a volume when - the volume is attached to an instance - :param min_sleep: minimum sleep time between snapshot creation and - deletion (in seconds) - :param max_sleep: maximum sleep time between snapshot creation and - deletion (in seconds) - :param kwargs: optional args to create a snapshot - """ - volume = random.choice(self.context["tenant"]["volumes"]) - snapshot = self.cinder.create_snapshot(volume["id"], force=force, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self.cinder.delete_snapshot(snapshot) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_params") -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder", "nova"]}, - name="CinderVolumes.create_and_attach_volume", - platform="openstack") -class CreateAndAttachVolume(cinder_utils.CinderBasic, - nova_utils.NovaScenario): - - @logging.log_deprecated_args( - "Use 'create_vm_params' for additional instance parameters.", - "0.2.0", ["kwargs"], once=True) - def run(self, size, image, flavor, create_volume_params=None, - create_vm_params=None, **kwargs): - """Create a VM and attach a volume to it. - - Simple test to create a VM and attach a volume, then - detach the volume and delete volume/VM. - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param create_volume_params: optional arguments for volume creation - :param create_vm_params: optional arguments for VM creation - :param kwargs: (deprecated) optional arguments for VM creation - """ - - create_volume_params = create_volume_params or {} - - if kwargs and create_vm_params: - raise ValueError("You can not set both 'kwargs'" - "and 'create_vm_params' attributes." - "Please use 'create_vm_params'.") - - create_vm_params = create_vm_params or kwargs or {} - - server = self._boot_server(image, flavor, **create_vm_params) - volume = self.cinder.create_volume(size, **create_volume_params) - - self._attach_volume(server, volume) - self._detach_volume(server, volume) - - self.cinder.delete_volume(volume) - self._delete_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_vm_params") -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("volume_type_exists", param_name="volume_type", nullable=True) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder", "nova"]}, - name="CinderVolumes.create_snapshot_and_attach_volume", - platform="openstack") -class CreateSnapshotAndAttachVolume(cinder_utils.CinderBasic, - nova_utils.NovaScenario): - - def run(self, image, flavor, volume_type=None, size=None, - create_vm_params=None, **kwargs): - """Create vm, volume, snapshot and attach/detach volume. - - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param volume_type: Name of volume type to use - :param size: Volume size - dictionary, contains two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - default values: {"min": 1, "max": 5} - :param create_vm_params: optional arguments for VM creation - :param kwargs: Optional parameters used during volume - snapshot creation. - """ - if size is None: - size = {"min": 1, "max": 5} - - volume = self.cinder.create_volume(size, volume_type=volume_type) - snapshot = self.cinder.create_snapshot(volume.id, force=False, - **kwargs) - create_vm_params = create_vm_params or {} - - server = self._boot_server(image, flavor, **create_vm_params) - - self._attach_volume(server, volume) - self._detach_volume(server, volume) - - self.cinder.delete_snapshot(snapshot) - self.cinder.delete_volume(volume) - self._delete_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_snapshot_kwargs") -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_vm_params") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder", "nova"]}, - name="CinderVolumes.create_nested_snapshots" - "_and_attach_volume", - platform="openstack") -class CreateNestedSnapshotsAndAttachVolume(cinder_utils.CinderBasic, - nova_utils.NovaScenario): - - def run(self, image, flavor, size=None, nested_level=1, - create_volume_kwargs=None, create_snapshot_kwargs=None, - create_vm_params=None): - """Create a volume from snapshot and attach/detach the volume - - This scenario create vm, volume, create it's snapshot, attach volume, - then create new volume from existing snapshot and so on, - with defined nested level, after all detach and delete them. - volume->snapshot->volume->snapshot->volume ... - - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param size: Volume size - dictionary, contains two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - default values: {"min": 1, "max": 5} - :param nested_level: amount of nested levels - :param create_volume_kwargs: optional args to create a volume - :param create_snapshot_kwargs: optional args to create a snapshot - :param create_vm_params: optional arguments for VM creation - """ - if size is None: - size = {"min": 1, "max": 5} - - # NOTE: Volume size cannot be smaller than the snapshot size, so - # volume with specified size should be created to avoid - # size mismatching between volume and snapshot due random - # size in _create_volume method. - size = random.randint(size["min"], size["max"]) - - create_volume_kwargs = create_volume_kwargs or {} - create_snapshot_kwargs = create_snapshot_kwargs or {} - create_vm_params = create_vm_params or {} - - server = self._boot_server(image, flavor, **create_vm_params) - - source_vol = self.cinder.create_volume(size, **create_volume_kwargs) - snapshot = self.cinder.create_snapshot(source_vol.id, force=False, - **create_snapshot_kwargs) - self._attach_volume(server, source_vol) - - nes_objs = [(server, source_vol, snapshot)] - for i in range(nested_level - 1): - volume = self.cinder.create_volume(size, snapshot_id=snapshot.id) - snapshot = self.cinder.create_snapshot(volume.id, force=False, - **create_snapshot_kwargs) - self._attach_volume(server, volume) - - nes_objs.append((server, volume, snapshot)) - - nes_objs.reverse() - for server, volume, snapshot in nes_objs: - self._detach_volume(server, volume) - self.cinder.delete_snapshot(snapshot) - self.cinder.delete_volume(volume) - self._delete_server(server) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_contexts", contexts=("volumes")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_list_snapshots", - platform="openstack") -class CreateAndListSnapshots(cinder_utils.CinderBasic, - nova_utils.NovaScenario): - - def run(self, force=False, detailed=True, **kwargs): - """Create and then list a volume-snapshot. - - :param force: when set to True, allows snapshot of a volume when - the volume is attached to an instance - :param detailed: True if detailed information about snapshots - should be listed - :param kwargs: optional args to create a snapshot - """ - volume = random.choice(self.context["tenant"]["volumes"]) - self.cinder.create_snapshot(volume["id"], force=force, **kwargs) - self.cinder.list_snapshots(detailed) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("required_services", services=[consts.Service.CINDER, - consts.Service.GLANCE]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder", "glance"]}, - name="CinderVolumes.create_and_upload_volume_to_image", - platform="openstack") -class CreateAndUploadVolumeToImage(cinder_utils.CinderBasic, - images.GlanceBasic): - - def run(self, size, image=None, force=False, container_format="bare", - disk_format="raw", do_delete=True, **kwargs): - """Create and upload a volume to image. - - :param size: volume size (integers, in GB), or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create volume. - :param force: when set to True volume that is attached to an instance - could be uploaded to image - :param container_format: image container format - :param disk_format: disk format for image - :param do_delete: deletes image and volume after uploading if True - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - volume = self.cinder.create_volume(size, **kwargs) - image = self.cinder.upload_volume_to_image( - volume, force=force, container_format=container_format, - disk_format=disk_format) - - if do_delete: - self.cinder.delete_volume(volume) - self.glance.delete_image(image.id) - - -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names="name", - subdict="create_backup_kwargs") -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_cinder_services", services="cinder-backup") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_volume_backup", - platform="openstack") -class CreateVolumeBackup(cinder_utils.CinderBasic): - - def run(self, size, do_delete=True, create_volume_kwargs=None, - create_backup_kwargs=None): - """Create a volume backup. - - :param size: volume size in GB - :param do_delete: if True, a volume and a volume backup will - be deleted after creation. - :param create_volume_kwargs: optional args to create a volume - :param create_backup_kwargs: optional args to create a volume backup - """ - create_volume_kwargs = create_volume_kwargs or {} - create_backup_kwargs = create_backup_kwargs or {} - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - backup = self.cinder.create_backup(volume.id, **create_backup_kwargs) - - if do_delete: - self.cinder.delete_volume(volume) - self.cinder.delete_backup(backup) - - -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names="name", - subdict="create_backup_kwargs") -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_cinder_services", services="cinder-backup") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_restore_volume_backup", - platform="openstack") -class CreateAndRestoreVolumeBackup(cinder_utils.CinderBasic): - - def run(self, size, do_delete=True, create_volume_kwargs=None, - create_backup_kwargs=None): - """Restore volume backup. - - :param size: volume size in GB - :param do_delete: if True, the volume and the volume backup will - be deleted after creation. - :param create_volume_kwargs: optional args to create a volume - :param create_backup_kwargs: optional args to create a volume backup - """ - create_volume_kwargs = create_volume_kwargs or {} - create_backup_kwargs = create_backup_kwargs or {} - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - backup = self.cinder.create_backup(volume.id, **create_backup_kwargs) - self.cinder.restore_backup(backup.id) - - if do_delete: - self.cinder.delete_volume(volume) - self.cinder.delete_backup(backup) - - -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_volume_kwargs") -@validation.add("restricted_parameters", param_names="name", - subdict="create_backup_kwargs") -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_cinder_services", services="cinder-backup") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_list_volume_backups", - platform="openstack") -class CreateAndListVolumeBackups(cinder_utils.CinderBasic): - - def run(self, size, detailed=True, do_delete=True, - create_volume_kwargs=None, create_backup_kwargs=None): - """Create and then list a volume backup. - - :param size: volume size in GB - :param detailed: True if detailed information about backup - should be listed - :param do_delete: if True, a volume backup will be deleted - :param create_volume_kwargs: optional args to create a volume - :param create_backup_kwargs: optional args to create a volume backup - """ - create_volume_kwargs = create_volume_kwargs or {} - create_backup_kwargs = create_backup_kwargs or {} - - volume = self.cinder.create_volume(size, **create_volume_kwargs) - backup = self.cinder.create_backup(volume.id, **create_backup_kwargs) - self.cinder.list_backups(detailed) - - if do_delete: - self.cinder.delete_volume(volume) - self.cinder.delete_backup(backup) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_volume_and_clone", - platform="openstack") -class CreateVolumeAndClone(cinder_utils.CinderBasic): - - def run(self, size, image=None, nested_level=1, **kwargs): - """Create a volume, then clone it to another volume. - - This creates a volume, then clone it to anothor volume, - and then clone the new volume to next volume... - - 1. create source volume (from image) - 2. clone source volume to volume1 - 3. clone volume1 to volume2 - 4. clone volume2 to volume3 - 5. ... - - :param size: volume size (integer, in GB) or - dictionary, must contain two values: - min - minimum size volumes will be created as; - max - maximum size volumes will be created as. - :param image: image to be used to create initial volume - :param nested_level: amount of nested levels - :param kwargs: optional args to create volumes - """ - if image: - kwargs["imageRef"] = image - - source_vol = self.cinder.create_volume(size, **kwargs) - - kwargs.pop("imageRef", None) - for i in range(nested_level): - with atomic.ActionTimer(self, "cinder.clone_volume"): - source_vol = self.cinder.create_volume( - source_vol.size, source_volid=source_vol.id, - **kwargs) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("restricted_parameters", param_names=["name", "display_name"], - subdict="create_snapshot_kwargs") -@validation.add("required_contexts", contexts=("volumes")) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_volume_from_snapshot", - platform="openstack") -class CreateVolumeFromSnapshot(cinder_utils.CinderBasic): - - def run(self, do_delete=True, create_snapshot_kwargs=None, **kwargs): - """Create a volume-snapshot, then create a volume from this snapshot. - - :param do_delete: if True, a snapshot and a volume will - be deleted after creation. - :param create_snapshot_kwargs: optional args to create a snapshot - :param kwargs: optional args to create a volume - """ - create_snapshot_kwargs = create_snapshot_kwargs or {} - src_volume = random.choice(self.context["tenant"]["volumes"]) - - snapshot = self.cinder.create_snapshot(src_volume["id"], - **create_snapshot_kwargs) - volume = self.cinder.create_volume(src_volume["size"], - snapshot_id=snapshot.id, - **kwargs) - - if do_delete: - self.cinder.delete_snapshot(snapshot) - self.cinder.delete_volume(volume) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_volume_" - "and_update_readonly_flag", - platform="openstack") -class CreateVolumeAndUpdateReadonlyFlag(cinder_utils.CinderBasic): - - def run(self, size, image=None, read_only=True, **kwargs): - """Create a volume and then update its readonly flag. - - :param size: volume size (integer, in GB) - :param image: image to be used to create volume - :param read_only: The value to indicate whether to update volume to - read-only access mode - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - volume = self.cinder.create_volume(size, **kwargs) - self.cinder.update_readonly_flag(volume.id, read_only=read_only) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("restricted_parameters", param_names=["name", "display_name"]) -@validation.add("image_exists", param_name="image", nullable=True) -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["cinder"]}, - name="CinderVolumes.create_and_accept_transfer", - platform="openstack") -class CreateAndAcceptTransfer(cinder_utils.CinderBasic): - - def run(self, size, image=None, **kwargs): - """Create a volume transfer, then accept it - - Measure the "cinder transfer-create" and "cinder transfer-accept" - command performace. - :param size: volume size (integer, in GB) - :param image: image to be used to create initial volume - :param kwargs: optional args to create a volume - """ - if image: - kwargs["imageRef"] = image - volume = self.cinder.create_volume(size, **kwargs) - transfer = self.cinder.transfer_create(volume.id) - self.cinder.transfer_accept(transfer.id, auth_key=transfer.auth_key) diff --git a/rally/plugins/openstack/scenarios/designate/__init__.py b/rally/plugins/openstack/scenarios/designate/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/designate/basic.py b/rally/plugins/openstack/scenarios/designate/basic.py deleted file mode 100644 index c7c1da44e8..0000000000 --- a/rally/plugins/openstack/scenarios/designate/basic.py +++ /dev/null @@ -1,377 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Author: Endre Karlson -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import random - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.designate import utils -from rally.task import validation - - -"""Basic scenarios for Designate.""" - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_list_domains", - platform="openstack") -class CreateAndListDomains(utils.DesignateScenario): - - def run(self): - """Create a domain and list all domains. - - Measure the "designate domain-list" command performance. - - If you have only 1 user in your context, you will - add 1 domain on every iteration. So you will have more - and more domain and will be able to measure the - performance of the "designate domain-list" command depending on - the number of domains owned by users. - """ - domain = self._create_domain() - msg = "Domain isn't created" - self.assertTrue(domain, msg) - list_domains = self._list_domains() - self.assertIn(domain, list_domains) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="DesignateBasic.list_domains", - platform="openstack") -class ListDomains(utils.DesignateScenario): - - def run(self): - """List Designate domains. - - This simple scenario tests the designate domain-list command by listing - all the domains. - - Suppose if we have 2 users in context and each has 2 domains - uploaded for them we will be able to test the performance of - designate domain-list command in this case. - """ - self._list_domains() - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_delete_domain", - platform="openstack") -class CreateAndDeleteDomain(utils.DesignateScenario): - - def run(self): - """Create and then delete a domain. - - Measure the performance of creating and deleting domains - with different level of load. - """ - domain = self._create_domain() - self._delete_domain(domain["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_update_domain", - platform="openstack") -class CreateAndUpdateDomain(utils.DesignateScenario): - - def run(self): - """Create and then update a domain. - - Measure the performance of creating and updating domains - with different level of load. - """ - domain = self._create_domain() - self._update_domain(domain) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_delete_records", - platform="openstack") -class CreateAndDeleteRecords(utils.DesignateScenario): - - def run(self, records_per_domain=5): - """Create and then delete records. - - Measure the performance of creating and deleting records - with different level of load. - - :param records_per_domain: Records to create pr domain. - """ - domain = self._create_domain() - - records = [] - - for i in range(records_per_domain): - record = self._create_record(domain) - records.append(record) - - for record in records: - self._delete_record( - domain["id"], record["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="DesignateBasic.list_records", - platform="openstack") -class ListRecords(utils.DesignateScenario): - - def run(self, domain_id): - """List Designate records. - - This simple scenario tests the designate record-list command by listing - all the records in a domain. - - Suppose if we have 2 users in context and each has 2 domains - uploaded for them we will be able to test the performance of - designate record-list command in this case. - - :param domain_id: Domain ID - """ - - self._list_records(domain_id) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_list_records", - platform="openstack") -class CreateAndListRecords(utils.DesignateScenario): - - def run(self, records_per_domain=5): - """Create and then list records. - - If you have only 1 user in your context, you will - add 1 record on every iteration. So you will have more - and more records and will be able to measure the - performance of the "designate record-list" command depending on - the number of domains/records owned by users. - - :param records_per_domain: Records to create pr domain. - """ - domain = self._create_domain() - for i in range(records_per_domain): - self._create_record(domain) - - self._list_records(domain["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_list_servers", - platform="openstack") -class CreateAndListServers(utils.DesignateScenario): - - def run(self): - """Create a Designate server and list all servers. - - If you have only 1 user in your context, you will - add 1 server on every iteration. So you will have more - and more server and will be able to measure the - performance of the "designate server-list" command depending on - the number of servers owned by users. - """ - server = self._create_server() - self.assertTrue(server) - list_servers = self._list_servers() - self.assertIn(server, list_servers) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_delete_server", - platform="openstack") -class CreateAndDeleteServer(utils.DesignateScenario): - - def run(self): - """Create and then delete a server. - - Measure the performance of creating and deleting servers - with different level of load. - """ - server = self._create_server() - self._delete_server(server["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="DesignateBasic.list_servers", platform="openstack") -class ListServers(utils.DesignateScenario): - - def run(self): - """List Designate servers. - - This simple scenario tests the designate server-list command by listing - all the servers. - """ - self._list_servers() - - -# NOTE: API V2 -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_list_zones", - platform="openstack") -class CreateAndListZones(utils.DesignateScenario): - - def run(self): - """Create a zone and list all zones. - - Measure the "openstack zone list" command performance. - - If you have only 1 user in your context, you will - add 1 zone on every iteration. So you will have more - and more zone and will be able to measure the - performance of the "openstack zone list" command depending on - the number of zones owned by users. - """ - zone = self._create_zone() - self.assertTrue(zone) - list_zones = self._list_zones() - self.assertIn(zone, list_zones) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="DesignateBasic.list_zones", platform="openstack") -class ListZones(utils.DesignateScenario): - - def run(self): - """List Designate zones. - - This simple scenario tests the openstack zone list command by listing - all the zones. - """ - - self._list_zones() - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_delete_zone", - platform="openstack") -class CreateAndDeleteZone(utils.DesignateScenario): - - def run(self): - """Create and then delete a zone. - - Measure the performance of creating and deleting zones - with different level of load. - """ - zone = self._create_zone() - self._delete_zone(zone["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="DesignateBasic.list_recordsets", - platform="openstack") -class ListRecordsets(utils.DesignateScenario): - - def run(self, zone_id): - """List Designate recordsets. - - This simple scenario tests the openstack recordset list command by - listing all the recordsets in a zone. - - :param zone_id: Zone ID - """ - - self._list_recordsets(zone_id) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("zones")) -@scenario.configure(context={"cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_delete_recordsets", - platform="openstack") -class CreateAndDeleteRecordsets(utils.DesignateScenario): - - def run(self, recordsets_per_zone=5): - """Create and then delete recordsets. - - Measure the performance of creating and deleting recordsets - with different level of load. - - :param recordsets_per_zone: recordsets to create pr zone. - """ - zone = random.choice(self.context["tenant"]["zones"]) - - recordsets = [] - - for i in range(recordsets_per_zone): - recordset = self._create_recordset(zone) - recordsets.append(recordset) - - for recordset in recordsets: - self._delete_recordset( - zone["id"], recordset["id"]) - - -@validation.add("required_services", - services=[consts.Service.DESIGNATE]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("zones")) -@scenario.configure(context={"cleanup@openstack": ["designate"]}, - name="DesignateBasic.create_and_list_recordsets", - platform="openstack") -class CreateAndListRecordsets(utils.DesignateScenario): - - def run(self, recordsets_per_zone=5): - """Create and then list recordsets. - - If you have only 1 user in your context, you will - add 1 recordset on every iteration. So you will have more - and more recordsets and will be able to measure the - performance of the "openstack recordset list" command depending on - the number of zones/recordsets owned by users. - - :param recordsets_per_zone: recordsets to create pr zone. - """ - zone = random.choice(self.context["tenant"]["zones"]) - - for i in range(recordsets_per_zone): - self._create_recordset(zone) - - self._list_recordsets(zone["id"]) diff --git a/rally/plugins/openstack/scenarios/designate/utils.py b/rally/plugins/openstack/scenarios/designate/utils.py deleted file mode 100644 index 8038030081..0000000000 --- a/rally/plugins/openstack/scenarios/designate/utils.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Author: Endre Karlson -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack import scenario -from rally.task import atomic - - -class DesignateScenario(scenario.OpenStackScenario): - """Base class for Designate scenarios with basic atomic actions.""" - - @atomic.action_timer("designate.create_domain") - def _create_domain(self, domain=None): - """Create domain. - - :param domain: dict, POST /v1/domains request options - :returns: designate domain dict - """ - domain = domain or {} - - domain.setdefault("email", "root@random.name") - domain["name"] = "%s.name." % self.generate_random_name() - return self.clients("designate").domains.create(domain) - - @atomic.action_timer("designate.list_domains") - def _list_domains(self): - """Return user domain list.""" - return self.clients("designate").domains.list() - - @atomic.action_timer("designate.delete_domain") - def _delete_domain(self, domain_id): - """Delete designate zone. - - :param domain_id: domain ID - """ - self.clients("designate").domains.delete(domain_id) - - @atomic.action_timer("designate.update_domain") - def _update_domain(self, domain): - """Update designate domain. - - :param domain: designate domain - :returns: designate updated domain dict - """ - domain["description"] = "updated domain" - domain["email"] = "updated@random.name" - return self.clients("designate").domains.update(domain) - - @atomic.action_timer("designate.create_record") - def _create_record(self, domain, record=None): - """Create a record in a domain. - - :param domain: domain dict - :param record: record dict - :returns: Designate record dict - """ - record = record or {} - record.setdefault("type", "A") - record["name"] = "%s.%s" % (self.generate_random_name(), - domain["name"]) - record.setdefault("data", "10.0.0.1") - - return self.clients("designate").records.create(domain["id"], record) - - @atomic.action_timer("designate.list_records") - def _list_records(self, domain_id): - """List domain records. - - :param domain_id: domain ID - :returns: domain records list - """ - return self.clients("designate").records.list(domain_id) - - @atomic.action_timer("designate.delete_record") - def _delete_record(self, domain_id, record_id): - """Delete a domain record. - - :param domain_id: domain ID - :param record_id: record ID - """ - self.clients("designate").records.delete(domain_id, record_id) - - @atomic.action_timer("designate.create_server") - def _create_server(self, server=None): - """Create server. - - :param server: dict, POST /v1/servers request options - :returns: designate server dict - """ - server = server or {} - - server["name"] = "%s.name." % self.generate_random_name() - return self.admin_clients("designate").servers.create(server) - - @atomic.action_timer("designate.list_servers") - def _list_servers(self): - """Return user server list.""" - return self.admin_clients("designate").servers.list() - - @atomic.action_timer("designate.delete_server") - def _delete_server(self, server_id): - """Delete Server. - - :param server_id: unicode server ID - """ - self.admin_clients("designate").servers.delete(server_id) - - # NOTE: API V2 - @atomic.action_timer("designate.create_zone") - def _create_zone(self, name=None, type_=None, email=None, description=None, - ttl=None): - """Create zone. - - :param name: Zone name - :param type_: Zone type, PRIMARY or SECONDARY - :param email: Zone owner email - :param description: Zone description - :param ttl: Zone ttl - Time to live in seconds - :returns: designate zone dict - """ - type_ = type_ or "PRIMARY" - - if type_ == "PRIMARY": - email = email or "root@random.name" - # Name is only useful to be random for PRIMARY - name = name or "%s.name." % self.generate_random_name() - - return self.clients("designate", version="2").zones.create( - name=name, - type_=type_, - email=email, - description=description, - ttl=ttl - ) - - @atomic.action_timer("designate.list_zones") - def _list_zones(self, criterion=None, marker=None, limit=None): - """Return user zone list. - - :param criterion: API Criterion to filter by - :param marker: UUID marker of the item to start the page from - :param limit: How many items to return in the page. - :returns: list of designate zones - """ - return self.clients("designate", version="2").zones.list() - - @atomic.action_timer("designate.delete_zone") - def _delete_zone(self, zone_id): - """Delete designate zone. - - :param zone_id: Zone ID - """ - self.clients("designate", version="2").zones.delete(zone_id) - - @atomic.action_timer("designate.list_recordsets") - def _list_recordsets(self, zone_id, criterion=None, marker=None, - limit=None): - """List zone recordsets. - - :param zone_id: Zone ID - :param criterion: API Criterion to filter by - :param marker: UUID marker of the item to start the page from - :param limit: How many items to return in the page. - :returns: zone recordsets list - """ - return self.clients("designate", version="2").recordsets.list( - zone_id, criterion=criterion, marker=marker, limit=limit) - - @atomic.action_timer("designate.create_recordset") - def _create_recordset(self, zone, recordset=None): - """Create a recordset in a zone. - - :param zone: zone dict - :param recordset: recordset dict - :returns: Designate recordset dict - """ - recordset = recordset or {} - recordset.setdefault("type_", recordset.pop("type", "A")) - if "name" not in recordset: - recordset["name"] = "%s.%s" % (self.generate_random_name(), - zone["name"]) - if "records" not in recordset: - recordset["records"] = ["10.0.0.1"] - - return self.clients("designate", version="2").recordsets.create( - zone["id"], **recordset) - - @atomic.action_timer("designate.delete_recordset") - def _delete_recordset(self, zone_id, recordset_id): - """Delete a zone recordset. - - :param zone_id: Zone ID - :param recordset_id: Recordset ID - """ - - self.clients("designate", version="2").recordsets.delete( - zone_id, recordset_id) diff --git a/rally/plugins/openstack/scenarios/dummy.py b/rally/plugins/openstack/scenarios/dummy.py deleted file mode 100644 index 7bd0793e33..0000000000 --- a/rally/plugins/openstack/scenarios/dummy.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.common.scenarios.dummy import dummy -from rally.plugins.openstack import scenario - - -@scenario.configure(name="Dummy.openstack") -class DummyOpenStack(dummy.Dummy, scenario.OpenStackScenario): - """Clone of Dummy.dummy for OpenStack""" diff --git a/rally/plugins/openstack/scenarios/ec2/__init__.py b/rally/plugins/openstack/scenarios/ec2/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/ec2/servers.py b/rally/plugins/openstack/scenarios/ec2/servers.py deleted file mode 100644 index 8587214d1c..0000000000 --- a/rally/plugins/openstack/scenarios/ec2/servers.py +++ /dev/null @@ -1,59 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ec2 import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for servers using EC2.""" - - -@validation.add("required_services", services=[consts.Service.EC2]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ec2"]}, - name="EC2Servers.list_servers", platform="openstack") -class ListServers(utils.EC2Scenario): - - def run(self): - """List all servers. - - This simple scenario tests the EC2 API list function by listing - all the servers. - """ - self._list_servers() - - -@types.convert(image={"type": "ec2_image"}, - flavor={"type": "ec2_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.EC2]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["ec2"]}, - name="EC2Servers.boot_server", platform="openstack") -class BootServer(utils.EC2Scenario): - - def run(self, image, flavor, **kwargs): - """Boot a server. - - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param kwargs: optional additional arguments for server creation - """ - self._boot_servers(image, flavor, **kwargs) diff --git a/rally/plugins/openstack/scenarios/ec2/utils.py b/rally/plugins/openstack/scenarios/ec2/utils.py deleted file mode 100644 index 70d60da334..0000000000 --- a/rally/plugins/openstack/scenarios/ec2/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from rally.common import cfg -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class EC2Scenario(scenario.OpenStackScenario): - """Base class for EC2 scenarios with basic atomic actions.""" - - @atomic.action_timer("ec2.list_servers") - def _list_servers(self): - """Returns user servers list.""" - return self.clients("ec2").get_only_instances() - - @atomic.action_timer("ec2.boot_servers") - def _boot_servers(self, image_id, flavor_name, - instance_num=1, **kwargs): - """Boot multiple servers. - - Returns when all the servers are actually booted and are in the - "Running" state. - - :param image_id: ID of the image to be used for server creation - :param flavor_name: Name of the flavor to be used for server creation - :param instance_num: Number of instances to boot - :param kwargs: Other optional parameters to boot servers - - :returns: List of created server objects - """ - reservation = self.clients("ec2").run_instances( - image_id=image_id, - instance_type=flavor_name, - min_count=instance_num, - max_count=instance_num, - **kwargs) - servers = [instance for instance in reservation.instances] - - self.sleep_between(CONF.openstack.ec2_server_boot_prepoll_delay) - servers = [utils.wait_for_status( - server, - ready_statuses=["RUNNING"], - update_resource=self._update_resource, - timeout=CONF.openstack.ec2_server_boot_timeout, - check_interval=CONF.openstack.ec2_server_boot_poll_interval - ) for server in servers] - return servers - - def _update_resource(self, resource): - resource.update() - return resource diff --git a/rally/plugins/openstack/scenarios/glance/__init__.py b/rally/plugins/openstack/scenarios/glance/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/glance/images.py b/rally/plugins/openstack/scenarios/glance/images.py deleted file mode 100644 index 7744da4013..0000000000 --- a/rally/plugins/openstack/scenarios/glance/images.py +++ /dev/null @@ -1,377 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.plugins.openstack.services.image import glance_v2 -from rally.plugins.openstack.services.image import image -from rally.task import types -from rally.task import validation - -LOG = logging.getLogger(__name__) - -"""Scenarios for Glance images.""" - - -class GlanceBasic(scenario.OpenStackScenario): - def __init__(self, context=None, admin_clients=None, clients=None): - super(GlanceBasic, self).__init__(context, admin_clients, clients) - if hasattr(self, "_admin_clients"): - self.admin_glance = image.Image( - self._admin_clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - if hasattr(self, "_clients"): - self.glance = image.Image( - self._clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - - -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@types.convert(image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["glance"]}, - name="GlanceImages.create_and_list_image", - platform="openstack") -class CreateAndListImage(GlanceBasic): - - def run(self, container_format, image_location, disk_format, - visibility="private", min_disk=0, min_ram=0, properties=None): - """Create an image and then list all images. - - Measure the "glance image-list" command performance. - - If you have only 1 user in your context, you will - add 1 image on every iteration. So you will have more - and more images and will be able to measure the - performance of the "glance image-list" command depending on - the number of images owned by users. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: A dict of image metadata properties to set - on the image - """ - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram, - properties=properties) - self.assertTrue(image) - image_list = self.glance.list_images() - self.assertIn(image.id, [i.id for i in image_list]) - - -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@types.convert(image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["glance"]}, - name="GlanceImages.create_and_get_image", - platform="openstack") -class CreateAndGetImage(GlanceBasic): - - def run(self, container_format, image_location, disk_format, - visibility="private", min_disk=0, min_ram=0, properties=None): - """Create and get detailed information of an image. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: A dict of image metadata properties to set - on the image - """ - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram, - properties=properties) - self.assertTrue(image) - image_info = self.glance.get_image(image) - self.assertEqual(image.id, image_info.id) - - -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="GlanceImages.list_images", - platform="openstack") -class ListImages(GlanceBasic): - - def run(self): - """List all images. - - This simple scenario tests the glance image-list command by listing - all the images. - - Suppose if we have 2 users in context and each has 2 images - uploaded for them we will be able to test the performance of - glance image-list command in this case. - """ - self.glance.list_images() - - -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@types.convert(image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["glance"]}, - name="GlanceImages.create_and_delete_image", - platform="openstack") -class CreateAndDeleteImage(GlanceBasic): - - def run(self, container_format, image_location, disk_format, - visibility="private", min_disk=0, min_ram=0, properties=None): - """Create and then delete an image. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: A dict of image metadata properties to set - on the image - """ - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram, - properties=properties) - self.glance.delete_image(image.id) - - -@types.convert(flavor={"type": "nova_flavor"}, - image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@validation.add("restricted_parameters", param_names=["image_name", "name"]) -@validation.add("flavor_exists", param_name="flavor") -@validation.add("required_services", services=[consts.Service.GLANCE, - consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["glance", "nova"]}, - name="GlanceImages.create_image_and_boot_instances", - platform="openstack") -class CreateImageAndBootInstances(GlanceBasic, nova_utils.NovaScenario): - - def run(self, container_format, image_location, disk_format, - flavor, number_instances, visibility="private", min_disk=0, - min_ram=0, properties=None, boot_server_kwargs=None, **kwargs): - """Create an image and boot several instances from it. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: A dict of image metadata properties to set - on the image - :param flavor: Nova flavor to be used to launch an instance - :param number_instances: number of Nova servers to boot - :param boot_server_kwargs: optional parameters to boot server - :param kwargs: optional parameters to create server (deprecated) - """ - boot_server_kwargs = boot_server_kwargs or kwargs or {} - - if kwargs: - LOG.warning("'kwargs' is deprecated in Rally v0.8.0: Use " - "'boot_server_kwargs' for additional parameters when " - "booting servers.") - - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram, - properties=properties) - - self._boot_servers(image.id, flavor, number_instances, - **boot_server_kwargs) - - -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@types.convert(image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["glance"]}, - name="GlanceImages.create_and_update_image", - platform="openstack") -class CreateAndUpdateImage(GlanceBasic): - - def run(self, container_format, image_location, disk_format, - remove_props=None, visibility="private", create_min_disk=0, - create_min_ram=0, create_properties=None, - update_min_disk=0, update_min_ram=0): - """Create an image then update it. - - Measure the "glance image-create" and "glance image-update" commands - performance. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param remove_props: List of property names to remove. - (It is only supported by Glance v2.) - :param visibility: The access permission for the created image - :param create_min_disk: The min disk of created images - :param create_min_ram: The min ram of created images - :param create_properties: A dict of image metadata properties to set - on the created image - :param update_min_disk: The min disk of updated images - :param update_min_ram: The min ram of updated images - """ - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=create_min_disk, - min_ram=create_min_ram, - properties=create_properties) - - self.glance.update_image(image.id, - min_disk=update_min_disk, - min_ram=update_min_ram, - remove_props=remove_props) - - -@validation.add("required_services", services=(consts.Service.GLANCE, )) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_api_versions", component="glance", versions=["2"]) -@scenario.configure(context={"cleanup@openstack": ["glance"]}, - name="GlanceImages.create_and_deactivate_image", - platform="openstack") -class CreateAndDeactivateImage(GlanceBasic): - def run(self, container_format, image_location, disk_format, - visibility="private", min_disk=0, min_ram=0): - """Create an image, then deactivate it. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - """ - service = glance_v2.GlanceV2Service(self._clients, - self.generate_random_name, - atomic_inst=self.atomic_actions()) - - image = service.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram) - service.deactivate_image(image.id) - - -@validation.add("enum", param_name="container_format", - values=["ami", "ari", "aki", "bare", "ovf"]) -@validation.add("enum", param_name="disk_format", - values=["ami", "ari", "aki", "vhd", "vmdk", "raw", - "qcow2", "vdi", "iso"]) -@types.convert(image_location={"type": "path_or_url"}, - kwargs={"type": "glance_image_args"}) -@validation.add("required_services", services=[consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["glance"]}, - name="GlanceImages.create_and_download_image", - platform="openstack") -class CreateAndDownloadImage(GlanceBasic): - - def run(self, container_format, image_location, disk_format, - visibility="private", min_disk=0, min_ram=0, properties=None): - """Create an image, then download data of the image. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param visibility: The access permission for the created image - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: A dict of image metadata properties to set - on the image - """ - image = self.glance.create_image( - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram, - properties=properties) - - self.glance.download_image(image.id) diff --git a/rally/plugins/openstack/scenarios/glance/utils.py b/rally/plugins/openstack/scenarios/glance/utils.py deleted file mode 100644 index 1c10af683a..0000000000 --- a/rally/plugins/openstack/scenarios/glance/utils.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally.common import logging -from rally.plugins.openstack import scenario -from rally.plugins.openstack.wrappers import glance as glance_wrapper -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class GlanceScenario(scenario.OpenStackScenario): - """Base class for Glance scenarios with basic atomic actions.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(GlanceScenario, self).__init__(context, admin_clients, clients) - LOG.warning( - "Class %s is deprecated since Rally 0.10.0 and will be removed " - "soon. Use " - "rally.plugins.openstack.services.image.image.Image " - "instead." % self.__class__) - - @atomic.action_timer("glance.list_images") - def _list_images(self): - """Returns user images list.""" - return list(self.clients("glance").images.list()) - - @atomic.action_timer("glance.create_image") - def _create_image(self, container_format, image_location, disk_format, - **kwargs): - """Create a new image. - - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param image_location: image file location - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso - :param kwargs: optional parameters to create image - - :returns: image object - """ - if not kwargs.get("name"): - kwargs["name"] = self.generate_random_name() - client = glance_wrapper.wrap(self._clients.glance, self) - return client.create_image(container_format, image_location, - disk_format, **kwargs) - - @atomic.action_timer("glance.delete_image") - def _delete_image(self, image): - """Deletes given image. - - Returns when the image is actually deleted. - - :param image: Image object - """ - self.clients("glance").images.delete(image.id) - wrapper = glance_wrapper.wrap(self._clients.glance, self) - utils.wait_for_status( - image, ["deleted", "pending_delete"], - check_deletion=True, - update_resource=wrapper.get_image, - timeout=CONF.openstack.glance_image_delete_timeout, - check_interval=CONF.openstack.glance_image_delete_poll_interval) diff --git a/rally/plugins/openstack/scenarios/gnocchi/__init__.py b/rally/plugins/openstack/scenarios/gnocchi/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/gnocchi/archive_policy_rule.py b/rally/plugins/openstack/scenarios/gnocchi/archive_policy_rule.py deleted file mode 100644 index 98640af4d9..0000000000 --- a/rally/plugins/openstack/scenarios/gnocchi/archive_policy_rule.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.gnocchi import utils as gnocchiutils -from rally.task import validation - -"""Scenarios for Gnocchi archive policy rule.""" - - -@validation.add("required_services", services=[consts.Service.GNOCCHI]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="GnocchiArchivePolicyRule.list_archive_policy_rule") -class ListArchivePolicyRule(gnocchiutils.GnocchiBase): - - def run(self): - """List archive policy rules.""" - self.gnocchi.list_archive_policy_rule() - - -@validation.add("required_services", services=[consts.Service.GNOCCHI]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["gnocchi"]}, - name="GnocchiArchivePolicyRule.create_archive_policy_rule") -class CreateArchivePolicyRule(gnocchiutils.GnocchiBase): - - def run(self, metric_pattern="cpu_*", archive_policy_name="low"): - """Create archive policy rule. - - :param metric_pattern: Pattern for matching metrics - :param archive_policy_name: Archive policy name - """ - name = self.generate_random_name() - self.admin_gnocchi.create_archive_policy_rule( - name, - metric_pattern=metric_pattern, - archive_policy_name=archive_policy_name) - - -@validation.add("required_services", services=[consts.Service.GNOCCHI]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure( - context={"admin_cleanup@openstack": ["gnocchi"]}, - name="GnocchiArchivePolicyRule.create_delete_archive_policy_rule") -class CreateDeleteArchivePolicyRule(gnocchiutils.GnocchiBase): - - def run(self, metric_pattern="cpu_*", archive_policy_name="low"): - """Create archive policy rule and then delete it. - - :param metric_pattern: Pattern for matching metrics - :param archive_policy_name: Archive policy name - """ - name = self.generate_random_name() - self.admin_gnocchi.create_archive_policy_rule( - name, - metric_pattern=metric_pattern, - archive_policy_name=archive_policy_name) - self.admin_gnocchi.delete_archive_policy_rule(name) diff --git a/rally/plugins/openstack/scenarios/gnocchi/capabilities.py b/rally/plugins/openstack/scenarios/gnocchi/capabilities.py deleted file mode 100644 index 1f88fb4ef7..0000000000 --- a/rally/plugins/openstack/scenarios/gnocchi/capabilities.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.gnocchi import utils as gnocchiutils -from rally.task import validation - -"""Scenarios for Gnocchi capabilities.""" - - -@validation.add("required_services", - services=[consts.Service.GNOCCHI]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="Gnocchi.list_capabilities") -class ListCapabilities(gnocchiutils.GnocchiBase): - - def run(self): - """List supported aggregation methods.""" - self.gnocchi.list_capabilities() diff --git a/rally/plugins/openstack/scenarios/gnocchi/status.py b/rally/plugins/openstack/scenarios/gnocchi/status.py deleted file mode 100644 index 39357c4b68..0000000000 --- a/rally/plugins/openstack/scenarios/gnocchi/status.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.gnocchi import utils as gnocchiutils -from rally.task import validation - -"""Scenarios for Gnocchi status.""" - - -@validation.add("required_services", - services=[consts.Service.GNOCCHI]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="Gnocchi.get_status") -class GetStatus(gnocchiutils.GnocchiBase): - - def run(self, detailed=False): - """Get the status of measurements processing. - - :param detailed: get detailed output - """ - self.admin_gnocchi.get_status(detailed) diff --git a/rally/plugins/openstack/scenarios/gnocchi/utils.py b/rally/plugins/openstack/scenarios/gnocchi/utils.py deleted file mode 100644 index 60e91a31bc..0000000000 --- a/rally/plugins/openstack/scenarios/gnocchi/utils.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack import scenario -from rally.plugins.openstack.services.gnocchi import metric - - -class GnocchiBase(scenario.OpenStackScenario): - """Base class for Gnocchi scenarios with basic atomic actions.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(GnocchiBase, self).__init__(context, admin_clients, clients) - if hasattr(self, "_admin_clients"): - self.admin_gnocchi = metric.GnocchiService( - self._admin_clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - if hasattr(self, "_clients"): - self.gnocchi = metric.GnocchiService( - self._clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) diff --git a/rally/plugins/openstack/scenarios/heat/__init__.py b/rally/plugins/openstack/scenarios/heat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/heat/stacks.py b/rally/plugins/openstack/scenarios/heat/stacks.py deleted file mode 100644 index d4b40b8f62..0000000000 --- a/rally/plugins/openstack/scenarios/heat/stacks.py +++ /dev/null @@ -1,394 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.heat import utils -from rally.task import atomic -from rally.task import types -from rally.task import validation - - -"""Scenarios for Heat stacks.""" - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_and_list_stack", - platform="openstack") -class CreateAndListStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create a stack and then list all stacks. - - Measure the "heat stack-create" and "heat stack-list" commands - performance. - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack(template_path, parameters, - files, environment) - self.assertTrue(stack) - list_stacks = self._list_stacks() - self.assertIn(stack.id, [i.id for i in list_stacks]) - - -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="HeatStacks.list_stacks_and_resources", - platform="openstack") -class ListStacksAndResources(utils.HeatScenario): - - def run(self): - """List all resources from tenant stacks.""" - stacks = self._list_stacks() - for stack in stacks: - with atomic.ActionTimer(self, "heat.list_resources"): - self.clients("heat").resources.list(stack.id) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_and_delete_stack", - platform="openstack") -class CreateAndDeleteStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create and then delete a stack. - - Measure the "heat stack-create" and "heat stack-delete" commands - performance. - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - - stack = self._create_stack(template_path, parameters, - files, environment) - self._delete_stack(stack) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_check_delete_stack", - platform="openstack") -class CreateCheckDeleteStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create, check and delete a stack. - - Measure the performance of the following commands: - - heat stack-create - - heat action-check - - heat stack-delete - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - - stack = self._create_stack(template_path, parameters, - files, environment) - self._check_stack(stack) - self._delete_stack(stack) - - -@types.convert(template_path={"type": "file"}, - updated_template_path={"type": "file"}, - files={"type": "file_dict"}, - updated_files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_update_delete_stack", - platform="openstack") -class CreateUpdateDeleteStack(utils.HeatScenario): - - def run(self, template_path, updated_template_path, - parameters=None, updated_parameters=None, - files=None, updated_files=None, - environment=None, updated_environment=None): - """Create, update and then delete a stack. - - Measure the "heat stack-create", "heat stack-update" - and "heat stack-delete" commands performance. - - :param template_path: path to stack template file - :param updated_template_path: path to updated stack template file - :param parameters: parameters to use in heat template - :param updated_parameters: parameters to use in updated heat template - If not specified then parameters will be - used instead - :param files: files used in template - :param updated_files: files used in updated template. If not specified - files value will be used instead - :param environment: stack environment definition - :param updated_environment: environment definition for updated stack - """ - - stack = self._create_stack(template_path, parameters, - files, environment) - self._update_stack(stack, updated_template_path, - updated_parameters or parameters, - updated_files or files, - updated_environment or environment) - self._delete_stack(stack) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_stack_and_scale", - platform="openstack") -class CreateStackAndScale(utils.HeatScenario): - - def run(self, template_path, output_key, delta, - parameters=None, files=None, - environment=None): - """Create an autoscaling stack and invoke a scaling policy. - - Measure the performance of autoscaling webhooks. - - :param template_path: path to template file that includes an - OS::Heat::AutoScalingGroup resource - :param output_key: the stack output key that corresponds to - the scaling webhook - :param delta: the number of instances the stack is expected to - change by. - :param parameters: parameters to use in heat template - :param files: files used in template (dict of file name to - file path) - :param environment: stack environment definition (dict) - """ - # TODO(stpierre): Kilo Heat is *much* better than Juno for the - # requirements of this scenario, so once Juno goes out of - # support we should update this scenario to suck less. Namely: - # - # * Kilo Heat can supply alarm_url attributes without needing - # an output key, so instead of getting the output key from - # the user, just get the name of the ScalingPolicy to apply. - # * Kilo Heat changes the status of a stack while scaling it, - # so _scale_stack() can check for the stack to have changed - # size and for it to be in UPDATE_COMPLETE state, so the - # user no longer needs to specify the expected delta. - stack = self._create_stack(template_path, parameters, files, - environment) - self._scale_stack(stack, output_key, delta) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_suspend_resume_delete_stack", - platform="openstack") -class CreateSuspendResumeDeleteStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create, suspend-resume and then delete a stack. - - Measure performance of the following commands: - heat stack-create - heat action-suspend - heat action-resume - heat stack-delete - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - - s = self._create_stack(template_path, parameters, files, environment) - self._suspend_stack(s) - self._resume_stack(s) - self._delete_stack(s) - - -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="HeatStacks.list_stacks_and_events", - platform="openstack") -class ListStacksAndEvents(utils.HeatScenario): - - def run(self): - """List events from tenant stacks.""" - stacks = self._list_stacks() - for stack in stacks: - with atomic.ActionTimer(self, "heat.list_events"): - self.clients("heat").events.list(stack.id) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("validate_heat_template", params="template_path") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_snapshot_restore_delete_stack", - platform="openstack") -class CreateSnapshotRestoreDeleteStack(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create, snapshot-restore and then delete a stack. - - Measure performance of the following commands: - heat stack-create - heat stack-snapshot - heat stack-restore - heat stack-delete - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - - stack = self._create_stack( - template_path, parameters, files, environment) - snapshot = self._snapshot_stack(stack) - self._restore_stack(stack, snapshot["id"]) - self._delete_stack(stack) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_stack_and_show_output_via_API", - platform="openstack") -class CreateStackAndShowOutputViaAPI(utils.HeatScenario): - - def run(self, template_path, output_key, - parameters=None, files=None, environment=None): - """Create stack and show output by using old algorithm. - - Measure performance of the following commands: - heat stack-create - heat output-show - - :param template_path: path to stack template file - :param output_key: the stack output key that corresponds to - the scaling webhook - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack( - template_path, parameters, files, environment) - self._stack_show_output_via_API(stack, output_key) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_stack_and_show_output", - platform="openstack") -class CreateStackAndShowOutput(utils.HeatScenario): - - def run(self, template_path, output_key, - parameters=None, files=None, environment=None): - """Create stack and show output by using new algorithm. - - Measure performance of the following commands: - heat stack-create - heat output-show - - :param template_path: path to stack template file - :param output_key: the stack output key that corresponds to - the scaling webhook - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack( - template_path, parameters, files, environment) - self._stack_show_output(stack, output_key) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_stack_and_list_output_via_API", - platform="openstack") -class CreateStackAndListOutputViaAPI(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create stack and list outputs by using old algorithm. - - Measure performance of the following commands: - heat stack-create - heat output-list - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack( - template_path, parameters, files, environment) - self._stack_list_output_via_API(stack) - - -@types.convert(template_path={"type": "file"}, files={"type": "file_dict"}) -@validation.add("required_services", services=[consts.Service.HEAT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["heat"]}, - name="HeatStacks.create_stack_and_list_output", - platform="openstack") -class CreateStackAndListOutput(utils.HeatScenario): - - def run(self, template_path, parameters=None, - files=None, environment=None): - """Create stack and list outputs by using new algorithm. - - Measure performance of the following commands: - heat stack-create - heat output-list - - :param template_path: path to stack template file - :param parameters: parameters to use in heat template - :param files: files used in template - :param environment: stack environment definition - """ - stack = self._create_stack( - template_path, parameters, files, environment) - self._stack_list_output(stack) diff --git a/rally/plugins/openstack/scenarios/heat/utils.py b/rally/plugins/openstack/scenarios/heat/utils.py deleted file mode 100644 index 5f083f4606..0000000000 --- a/rally/plugins/openstack/scenarios/heat/utils.py +++ /dev/null @@ -1,328 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import requests - -from rally.common import cfg -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF - - -class HeatScenario(scenario.OpenStackScenario): - """Base class for Heat scenarios with basic atomic actions.""" - - @atomic.action_timer("heat.list_stacks") - def _list_stacks(self): - """Return user stack list.""" - - return list(self.clients("heat").stacks.list()) - - @atomic.action_timer("heat.create_stack") - def _create_stack(self, template, parameters=None, - files=None, environment=None): - """Create a new stack. - - :param template: template with stack description. - :param parameters: template parameters used during stack creation - :param files: additional files used in template - :param environment: stack environment definition - - :returns: object of stack - """ - stack_name = self.generate_random_name() - kw = { - "stack_name": stack_name, - "disable_rollback": True, - "parameters": parameters or {}, - "template": template, - "files": files or {}, - "environment": environment or {} - } - - # heat client returns body instead manager object, so we should - # get manager object using stack_id - stack_id = self.clients("heat").stacks.create(**kw)["stack"]["id"] - stack = self.clients("heat").stacks.get(stack_id) - - self.sleep_between(CONF.openstack.heat_stack_create_prepoll_delay) - - stack = utils.wait_for_status( - stack, - ready_statuses=["CREATE_COMPLETE"], - failure_statuses=["CREATE_FAILED", "ERROR"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.heat_stack_create_timeout, - check_interval=CONF.openstack.heat_stack_create_poll_interval) - - return stack - - @atomic.action_timer("heat.update_stack") - def _update_stack(self, stack, template, parameters=None, - files=None, environment=None): - """Update an existing stack - - :param stack: stack that need to be updated - :param template: Updated template - :param parameters: template parameters for stack update - :param files: additional files used in template - :param environment: stack environment definition - - :returns: object of updated stack - """ - - kw = { - "stack_name": stack.stack_name, - "disable_rollback": True, - "parameters": parameters or {}, - "template": template, - "files": files or {}, - "environment": environment or {} - } - self.clients("heat").stacks.update(stack.id, **kw) - - self.sleep_between(CONF.openstack.heat_stack_update_prepoll_delay) - - stack = utils.wait_for_status( - stack, - ready_statuses=["UPDATE_COMPLETE"], - failure_statuses=["UPDATE_FAILED", "ERROR"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.heat_stack_update_timeout, - check_interval=CONF.openstack.heat_stack_update_poll_interval) - return stack - - @atomic.action_timer("heat.check_stack") - def _check_stack(self, stack): - """Check given stack. - - Check the stack and stack resources. - - :param stack: stack that needs to be checked - """ - self.clients("heat").actions.check(stack.id) - utils.wait_for_status( - stack, - ready_statuses=["CHECK_COMPLETE"], - failure_statuses=["CHECK_FAILED", "ERROR"], - update_resource=utils.get_from_manager(["CHECK_FAILED"]), - timeout=CONF.openstack.heat_stack_check_timeout, - check_interval=CONF.openstack.heat_stack_check_poll_interval) - - @atomic.action_timer("heat.delete_stack") - def _delete_stack(self, stack): - """Delete given stack. - - Returns when the stack is actually deleted. - - :param stack: stack object - """ - stack.delete() - utils.wait_for_status( - stack, - ready_statuses=["DELETE_COMPLETE"], - failure_statuses=["DELETE_FAILED", "ERROR"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.heat_stack_delete_timeout, - check_interval=CONF.openstack.heat_stack_delete_poll_interval) - - @atomic.action_timer("heat.suspend_stack") - def _suspend_stack(self, stack): - """Suspend given stack. - - :param stack: stack that needs to be suspended - """ - - self.clients("heat").actions.suspend(stack.id) - utils.wait_for_status( - stack, - ready_statuses=["SUSPEND_COMPLETE"], - failure_statuses=["SUSPEND_FAILED", "ERROR"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.heat_stack_suspend_timeout, - check_interval=CONF.openstack.heat_stack_suspend_poll_interval) - - @atomic.action_timer("heat.resume_stack") - def _resume_stack(self, stack): - """Resume given stack. - - :param stack: stack that needs to be resumed - """ - - self.clients("heat").actions.resume(stack.id) - utils.wait_for_status( - stack, - ready_statuses=["RESUME_COMPLETE"], - failure_statuses=["RESUME_FAILED", "ERROR"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.heat_stack_resume_timeout, - check_interval=CONF.openstack.heat_stack_resume_poll_interval) - - @atomic.action_timer("heat.snapshot_stack") - def _snapshot_stack(self, stack): - """Creates a snapshot for given stack. - - :param stack: stack that will be used as base for snapshot - :returns: snapshot created for given stack - """ - snapshot = self.clients("heat").stacks.snapshot( - stack.id) - utils.wait_for_status( - stack, - ready_statuses=["SNAPSHOT_COMPLETE"], - failure_statuses=["SNAPSHOT_FAILED", "ERROR"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.heat_stack_snapshot_timeout, - check_interval=CONF.openstack.heat_stack_snapshot_poll_interval) - return snapshot - - @atomic.action_timer("heat.restore_stack") - def _restore_stack(self, stack, snapshot_id): - """Restores stack from given snapshot. - - :param stack: stack that will be restored from snapshot - :param snapshot_id: id of given snapshot - """ - self.clients("heat").stacks.restore(stack.id, snapshot_id) - utils.wait_for_status( - stack, - ready_statuses=["RESTORE_COMPLETE"], - failure_statuses=["RESTORE_FAILED", "ERROR"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.heat_stack_restore_timeout, - check_interval=CONF.openstack.heat_stack_restore_poll_interval - ) - - @atomic.action_timer("heat.show_output") - def _stack_show_output(self, stack, output_key): - """Execute output_show for specified "output_key". - - This method uses new output API call. - :param stack: stack with output_key output. - :param output_key: The name of the output. - """ - output = self.clients("heat").stacks.output_show(stack.id, output_key) - return output - - @atomic.action_timer("heat.show_output_via_API") - def _stack_show_output_via_API(self, stack, output_key): - """Execute output_show for specified "output_key". - - This method uses old way for getting output value. - It gets whole stack object and then finds necessary "output_key". - :param stack: stack with output_key output. - :param output_key: The name of the output. - """ - # this code copy-pasted and adopted for rally from old client version - # https://github.com/openstack/python-heatclient/blob/0.8.0/heatclient/ - # v1/shell.py#L682-L699 - stack = self.clients("heat").stacks.get(stack_id=stack.id) - for output in stack.to_dict().get("outputs", []): - if output["output_key"] == output_key: - return output - - @atomic.action_timer("heat.list_output") - def _stack_list_output(self, stack): - """Execute output_list for specified "stack". - - This method uses new output API call. - :param stack: stack to call output-list. - """ - output_list = self.clients("heat").stacks.output_list(stack.id) - return output_list - - @atomic.action_timer("heat.list_output_via_API") - def _stack_list_output_via_API(self, stack): - """Execute output_list for specified "stack". - - This method uses old way for getting output value. - It gets whole stack object and then prints all outputs - belongs this stack. - :param stack: stack to call output-list. - """ - # this code copy-pasted and adopted for rally from old client version - # https://github.com/openstack/python-heatclient/blob/0.8.0/heatclient/ - # v1/shell.py#L649-L663 - stack = self.clients("heat").stacks.get(stack_id=stack.id) - output_list = stack.to_dict()["outputs"] - return output_list - - def _count_instances(self, stack): - """Count instances in a Heat stack. - - :param stack: stack to count instances in. - """ - return len([ - r for r in self.clients("heat").resources.list(stack.id, - nested_depth=1) - if r.resource_type == "OS::Nova::Server"]) - - def _scale_stack(self, stack, output_key, delta): - """Scale a stack up or down. - - Calls the webhook given in the output value identified by - 'output_key', and waits for the stack size to change by - 'delta'. - - :param stack: stack to scale up or down - :param output_key: The name of the output to get the URL from - :param delta: The expected change in number of instances in - the stack (signed int) - """ - num_instances = self._count_instances(stack) - expected_instances = num_instances + delta - LOG.debug("Scaling stack %s from %s to %s instances with %s" - % (stack.id, num_instances, expected_instances, output_key)) - with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key): - self._stack_webhook(stack, output_key) - utils.wait_for( - stack, - is_ready=lambda s: ( - self._count_instances(s) == expected_instances), - failure_statuses=["UPDATE_FAILED", "ERROR"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.heat_stack_scale_timeout, - check_interval=CONF.openstack.heat_stack_scale_poll_interval) - - def _stack_webhook(self, stack, output_key): - """POST to the URL given in the output value identified by output_key. - - This can be used to scale stacks up and down, for instance. - - :param stack: stack to call a webhook on - :param output_key: The name of the output to get the URL from - :raises InvalidConfigException: if the output key is not found - """ - url = None - for output in stack.outputs: - if output["output_key"] == output_key: - url = output["output_value"] - break - else: - raise exceptions.InvalidConfigException( - "No output key %(key)s found in stack %(id)s" % - {"key": output_key, "id": stack.id}) - - with atomic.ActionTimer(self, "heat.%s_webhook" % output_key): - requests.post(url).raise_for_status() diff --git a/rally/plugins/openstack/scenarios/ironic/__init__.py b/rally/plugins/openstack/scenarios/ironic/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/ironic/nodes.py b/rally/plugins/openstack/scenarios/ironic/nodes.py deleted file mode 100644 index 2fcfd5fefc..0000000000 --- a/rally/plugins/openstack/scenarios/ironic/nodes.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.ironic import utils -from rally.task import validation - - -"""Scenarios for ironic nodes.""" - - -@logging.log_deprecated_args("Useless arguments detected", "0.10.0", - ("marker", "limit", "sort_key"), once=True) -@validation.add("required_services", services=[consts.Service.IRONIC]) -@validation.add("restricted_parameters", param_names="name") -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["ironic"]}, - name="IronicNodes.create_and_list_node", - platform="openstack") -class CreateAndListNode(utils.IronicScenario): - - def run(self, driver, properties=None, associated=None, maintenance=None, - detail=False, sort_dir=None, marker=None, limit=None, - sort_key=None, **kwargs): - """Create and list nodes. - - :param driver: The name of the driver used to manage this Node. - :param properties: Key/value pair describing the physical - characteristics of the node. - :param associated: Optional argument of list request. Either a Boolean - or a string representation of a Boolean that indicates whether to - return a list of associated (True or "True") or unassociated - (False or "False") nodes. - :param maintenance: Optional argument of list request. Either a Boolean - or a string representation of a Boolean that indicates whether - to return nodes in maintenance mode (True or "True"), or not in - maintenance mode (False or "False"). - :param detail: Optional, boolean whether to return detailed - information about nodes. - :param sort_dir: Optional, direction of sorting, either 'asc' (the - default) or 'desc'. - :param marker: DEPRECATED since Rally 0.10.0 - :param limit: DEPRECATED since Rally 0.10.0 - :param sort_key: DEPRECATED since Rally 0.10.0 - :param kwargs: Optional additional arguments for node creation - """ - - node = self._create_node(driver, properties, **kwargs) - list_nodes = self._list_nodes( - associated=associated, maintenance=maintenance, detail=detail, - sort_dir=sort_dir) - self.assertIn(node.name, [n.name for n in list_nodes]) - - -@validation.add("required_services", services=[consts.Service.IRONIC]) -@validation.add("restricted_parameters", param_names="name") -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["ironic"]}, - name="IronicNodes.create_and_delete_node", - platform="openstack") -class CreateAndDeleteNode(utils.IronicScenario): - - def run(self, driver, properties=None, **kwargs): - """Create and delete node. - - :param driver: The name of the driver used to manage this Node. - :param properties: Key/value pair describing the physical - characteristics of the node. - :param kwargs: Optional additional arguments for node creation - """ - node = self._create_node(driver, properties, **kwargs) - self._delete_node(node) diff --git a/rally/plugins/openstack/scenarios/ironic/utils.py b/rally/plugins/openstack/scenarios/ironic/utils.py deleted file mode 100644 index ae45893ae4..0000000000 --- a/rally/plugins/openstack/scenarios/ironic/utils.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import string - -from rally.common import cfg -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class IronicScenario(scenario.OpenStackScenario): - """Base class for Ironic scenarios with basic atomic actions.""" - - # NOTE(stpierre): Ironic has two name checkers. The new-style - # checker, in API v1.10+, is quite relaxed and will Just Work with - # the default random name pattern. (See - # https://bugs.launchpad.net/ironic/+bug/1434376.) The old-style - # checker *claims* to implement RFCs 952 and 1123, but it doesn't - # actually. (See https://bugs.launchpad.net/ironic/+bug/1468508 - # for details.) The default RESOURCE_NAME_FORMAT works fine for - # the new-style checker, but the old-style checker only allows - # underscores after the first dot, for reasons that I'm sure are - # entirely obvious, so we have to supply a bespoke format for - # Ironic names. - RESOURCE_NAME_FORMAT = "s-rally-XXXXXXXX-XXXXXXXX" - RESOURCE_NAME_ALLOWED_CHARACTERS = string.ascii_lowercase + string.digits - - @atomic.action_timer("ironic.create_node") - def _create_node(self, driver, properties, **kwargs): - """Create node immediately. - - :param driver: The name of the driver used to manage this Node. - :param properties: Key/value pair describing the physical - characteristics of the node. - :param kwargs: optional parameters to create image - :returns: node object - """ - kwargs["name"] = self.generate_random_name() - node = self.admin_clients("ironic").node.create(driver=driver, - properties=properties, - **kwargs) - - self.sleep_between(CONF.openstack.ironic_node_create_poll_interval) - node = utils.wait_for_status( - node, - ready_statuses=["AVAILABLE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.ironic_node_create_timeout, - check_interval=CONF.openstack.ironic_node_poll_interval, - id_attr="uuid", status_attr="provision_state" - ) - - return node - - @atomic.action_timer("ironic.list_nodes") - def _list_nodes(self, associated=None, maintenance=None, detail=False, - sort_dir=None): - """Return list of nodes. - - :param associated: Optional. Either a Boolean or a string - representation of a Boolean that indicates whether - to return a list of associated (True or "True") or - unassociated (False or "False") nodes. - :param maintenance: Optional. Either a Boolean or a string - representation of a Boolean that indicates whether - to return nodes in maintenance mode (True or - "True"), or not in maintenance mode (False or - "False"). - :param detail: Optional, boolean whether to return detailed information - about nodes. - :param sort_dir: Optional, direction of sorting, either 'asc' (the - default) or 'desc'. - :returns: A list of nodes. - """ - return self.admin_clients("ironic").node.list( - associated=associated, maintenance=maintenance, detail=detail, - sort_dir=sort_dir) - - @atomic.action_timer("ironic.delete_node") - def _delete_node(self, node): - """Delete the node with specific id. - - :param node: Ironic node object - """ - self.admin_clients("ironic").node.delete(node.uuid) - - utils.wait_for_status( - node, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.ironic_node_delete_timeout, - check_interval=CONF.openstack.ironic_node_poll_interval, - id_attr="uuid", status_attr="provision_state" - ) diff --git a/rally/plugins/openstack/scenarios/keystone/__init__.py b/rally/plugins/openstack/scenarios/keystone/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/keystone/basic.py b/rally/plugins/openstack/scenarios/keystone/basic.py deleted file mode 100755 index 2e43d5bc49..0000000000 --- a/rally/plugins/openstack/scenarios/keystone/basic.py +++ /dev/null @@ -1,434 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally.plugins.openstack import scenario -from rally.plugins.openstack.services.identity import identity -from rally.task import validation - - -class KeystoneBasic(scenario.OpenStackScenario): - """Base class for Keystone scenarios with initialized service object.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(KeystoneBasic, self).__init__(context, admin_clients, clients) - if hasattr(self, "_admin_clients"): - self.admin_keystone = identity.Identity( - self._admin_clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - if hasattr(self, "_clients"): - self.keystone = identity.Identity( - self._clients, name_generator=self.generate_random_name, - atomic_inst=self.atomic_actions()) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_user", - platform="openstack") -class CreateUser(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_user is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone user with random name. - - :param kwargs: Other optional parameters to create users like - "tenant_id", "enabled". - """ - self.admin_keystone.create_user(**kwargs) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_delete_user", - platform="openstack") -class CreateDeleteUser(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_delete_user is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone user with random name and then delete it. - - :param kwargs: Other optional parameters to create users like - "tenant_id", "enabled". - """ - user = self.admin_keystone.create_user(**kwargs) - self.admin_keystone.delete_user(user.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_user_set_enabled_and_delete", - platform="openstack") -class CreateUserSetEnabledAndDelete(KeystoneBasic): - - def run(self, enabled=True, **kwargs): - """Create a keystone user, enable or disable it, and delete it. - - :param enabled: Initial state of user 'enabled' flag. The user - will be created with 'enabled' set to this - value, and then it will be toggled. - :param kwargs: Other optional parameters to create user. - """ - user = self.admin_keystone.create_user(enabled=enabled, **kwargs) - self.admin_keystone.update_user(user.id, enabled=(not enabled)) - self.admin_keystone.delete_user(user.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_tenant", - platform="openstack") -class CreateTenant(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_tenant is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone tenant with random name. - - :param kwargs: Other optional parameters - """ - self.admin_keystone.create_project(**kwargs) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.authenticate_user_and_validate_token", - platform="openstack") -class AuthenticateUserAndValidateToken(KeystoneBasic): - - def run(self): - """Authenticate and validate a keystone token.""" - token = self.admin_keystone.fetch_token() - self.admin_keystone.validate_token(token) - - -@validation.add("number", param_name="users_per_tenant", minval=1) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_tenant_with_users", - platform="openstack") -class CreateTenantWithUsers(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_tenant_with_users is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, users_per_tenant, name_length=10, **kwargs): - """Create a keystone tenant and several users belonging to it. - - :param users_per_tenant: number of users to create for the tenant - :param kwargs: Other optional parameters for tenant creation - :returns: keystone tenant instance - """ - tenant = self.admin_keystone.create_project(**kwargs) - self.admin_keystone.create_users(tenant.id, - number_of_users=users_per_tenant) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_list_users", - platform="openstack") -class CreateAndListUsers(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_and_list_users is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone user with random name and list all users. - - :param kwargs: Other optional parameters to create users like - "tenant_id", "enabled". - """ - - kwargs.pop("name", None) - self.admin_keystone.create_user(**kwargs) - self.admin_keystone.list_users() - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_list_tenants", - platform="openstack") -class CreateAndListTenants(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_and_list_tenants is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=10, **kwargs): - """Create a keystone tenant with random name and list all tenants. - - :param kwargs: Other optional parameters - """ - self.admin_keystone.create_project(**kwargs) - self.admin_keystone.list_projects() - - -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.add_and_remove_user_role", - platform="openstack") -class AddAndRemoveUserRole(KeystoneBasic): - - def run(self): - """Create a user role add to a user and disassociate.""" - tenant_id = self.context["tenant"]["id"] - user_id = self.context["user"]["id"] - role = self.admin_keystone.create_role() - self.admin_keystone.add_role(role_id=role.id, user_id=user_id, - project_id=tenant_id) - self.admin_keystone.revoke_role(role.id, user_id=user_id, - project_id=tenant_id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_delete_role", - platform="openstack") -class CreateAndDeleteRole(KeystoneBasic): - - def run(self): - """Create a user role and delete it.""" - role = self.admin_keystone.create_role() - self.admin_keystone.delete_role(role.id) - - -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_add_and_list_user_roles", - platform="openstack") -class CreateAddAndListUserRoles(KeystoneBasic): - - def run(self): - """Create user role, add it and list user roles for given user.""" - tenant_id = self.context["tenant"]["id"] - user_id = self.context["user"]["id"] - role = self.admin_keystone.create_role() - self.admin_keystone.add_role(user_id=user_id, role_id=role.id, - project_id=tenant_id) - self.admin_keystone.list_roles(user_id=user_id, project_id=tenant_id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.get_entities", - platform="openstack") -class GetEntities(KeystoneBasic): - - def run(self, service_name="keystone"): - """Get instance of a tenant, user, role and service by id's. - - An ephemeral tenant, user, and role are each created. By - default, fetches the 'keystone' service. This can be - overridden (for instance, to get the 'Identity Service' - service on older OpenStack), or None can be passed explicitly - to service_name to create a new service and then query it by - ID. - - :param service_name: The name of the service to get by ID; or - None, to create an ephemeral service and - get it by ID. - """ - project = self.admin_keystone.create_project() - user = self.admin_keystone.create_user(project_id=project.id) - role = self.admin_keystone.create_role() - self.admin_keystone.get_project(project.id) - self.admin_keystone.get_user(user.id) - self.admin_keystone.get_role(role.id) - if service_name is None: - service = self.admin_keystone.create_service() - else: - service = self.admin_keystone.get_service_by_name(service_name) - self.admin_keystone.get_service(service.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_delete_service", - platform="openstack") -class CreateAndDeleteService(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_delete_service will be ignored", - "0.0.5", ["name"]) - def run(self, name=None, service_type=None, description=None): - """Create and delete service. - - :param service_type: type of the service - :param description: description of the service - """ - service = self.admin_keystone.create_service(service_type=service_type, - description=description) - self.admin_keystone.delete_service(service.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_update_and_delete_tenant", - platform="openstack") -class CreateUpdateAndDeleteTenant(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_update_and_delete_tenant is " - "ignored", "0.1.2", ["name_length"], once=True) - def run(self, name_length=None, **kwargs): - """Create, update and delete tenant. - - :param kwargs: Other optional parameters for tenant creation - """ - project = self.admin_keystone.create_project(**kwargs) - new_name = self.generate_random_name() - new_description = self.generate_random_name() - self.admin_keystone.update_project(project.id, name=new_name, - description=new_description) - self.admin_keystone.delete_project(project.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_user_update_password", - platform="openstack") -class CreateUserUpdatePassword(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name_length' and 'password_length' arguments to " - "create_user_update_password are ignored", - "0.1.2", ["name_length", "password_length"], once=True) - def run(self, name_length=None, password_length=None): - """Create user and update password for that user.""" - user = self.admin_keystone.create_user() - password = self.generate_random_name() - self.admin_keystone.update_user(user.id, password=password) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_list_services", - platform="openstack") -class CreateAndListServices(KeystoneBasic): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_list_services will be ignored", - "0.0.5", ["name"]) - def run(self, name=None, service_type=None, description=None): - """Create and list services. - - :param service_type: type of the service - :param description: description of the service - """ - self.admin_keystone.create_service(service_type=service_type, - description=description) - self.admin_keystone.list_services() - - -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_list_ec2credentials", - platform="openstack") -class CreateAndListEc2Credentials(KeystoneBasic): - - def run(self): - """Create and List all keystone ec2-credentials.""" - self.keystone.create_ec2credentials( - self.context["user"]["id"], - project_id=self.context["tenant"]["id"]) - self.keystone.list_ec2credentials(self.context["user"]["id"]) - - -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_delete_ec2credential", - platform="openstack") -class CreateAndDeleteEc2Credential(KeystoneBasic): - - def run(self): - """Create and delete keystone ec2-credential.""" - creds = self.keystone.create_ec2credentials( - self.context["user"]["id"], - project_id=self.context["tenant"]["id"]) - self.keystone.delete_ec2credential( - self.context["user"]["id"], access=creds.access) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_get_role", - platform="openstack") -class CreateAndGetRole(KeystoneBasic): - - def run(self, **kwargs): - """Create a user role and get it detailed information. - - :param kwargs: Optional additional arguments for roles creation - """ - role = self.admin_keystone.create_role(**kwargs) - self.admin_keystone.get_role(role.id) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_list_roles", - platform="openstack") -class CreateAddListRoles(KeystoneBasic): - - def run(self, create_role_kwargs=None, list_role_kwargs=None): - """Create a role, then list all roles. - - :param create_role_kwargs: Optional additional arguments for - roles create - :param list_role_kwargs: Optional additional arguments for roles list - """ - create_role_kwargs = create_role_kwargs or {} - list_role_kwargs = list_role_kwargs or {} - - role = self.admin_keystone.create_role(**create_role_kwargs) - msg = "Role isn't created" - self.assertTrue(role, err_msg=msg) - all_roles = self.admin_keystone.list_roles(**list_role_kwargs) - msg = ("Created role is not in the" - " list of all available roles") - self.assertIn(role, all_roles, err_msg=msg) - - -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]}, - name="KeystoneBasic.create_and_update_user", - platform="openstack") -class CreateAndUpdateUser(KeystoneBasic): - - def run(self, create_user_kwargs=None, update_user_kwargs=None): - """Create user and update the user. - - :param create_user_kwargs: Optional additional arguments for user - creation - :param update_user_kwargs: Optional additional arguments for user - updation - """ - create_user_kwargs = create_user_kwargs or {} - - user = self.admin_keystone.create_user(**create_user_kwargs) - self.admin_keystone.update_user(user.id, **update_user_kwargs) - user_data = self.admin_clients("keystone").users.get(user.id) - - for args in update_user_kwargs: - msg = ("%s isn't updated" % args) - self.assertEqual(getattr(user_data, str(args)), - update_user_kwargs[args], err_msg=msg) diff --git a/rally/plugins/openstack/scenarios/keystone/utils.py b/rally/plugins/openstack/scenarios/keystone/utils.py deleted file mode 100644 index b5fa4d632b..0000000000 --- a/rally/plugins/openstack/scenarios/keystone/utils.py +++ /dev/null @@ -1,299 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from rally.common import logging -from rally.plugins.openstack import scenario -from rally.plugins.openstack.wrappers import keystone as keystone_wrapper -from rally.task import atomic - - -LOG = logging.getLogger(__name__) - - -class KeystoneScenario(scenario.OpenStackScenario): - """Base class for Keystone scenarios with basic atomic actions.""" - - def __init__(self, context=None, admin_clients=None, clients=None): - super(KeystoneScenario, self).__init__(context, admin_clients, clients) - LOG.warning( - "Class %s is deprecated since Rally 0.8.0 and will be removed " - "soon. Use " - "rally.plugins.openstack.services.identity.identity.Identity " - "instead." % self.__class__) - - @atomic.action_timer("keystone.create_user") - def _user_create(self, email=None, **kwargs): - """Creates keystone user with random name. - - :param kwargs: Other optional parameters to create users like - "tenant_id", "enabled". - :returns: keystone user instance - """ - name = self.generate_random_name() - # NOTE(boris-42): password and email parameters are required by - # keystone client v2.0. This should be cleanuped - # when we switch to v3. - password = kwargs.pop("password", str(uuid.uuid4())) - email = email or (name + "@rally.me") - return self.admin_clients("keystone").users.create( - name, password=password, email=email, **kwargs) - - @atomic.action_timer("keystone.update_user_enabled") - def _update_user_enabled(self, user, enabled): - """Enable or disable a user. - - :param user: The user to enable or disable - :param enabled: Boolean indicating if the user should be - enabled (True) or disabled (False) - """ - self.admin_clients("keystone").users.update_enabled(user, enabled) - - @atomic.action_timer("keystone.validate_token") - def _token_validate(self, token): - """Validate a token for a user. - - :param token: The token to validate - """ - self.admin_clients("keystone").tokens.validate(token) - - @atomic.action_timer("keystone.token_authenticate") - def _authenticate_token(self, name, password, tenant_id, tenant): - """Authenticate user token. - - :param name: The user username - :param password: User password for authentication - :param tenant_id: Tenant id for authentication - :param tenant: Tenant on which authentication will take place - """ - return self.admin_clients("keystone").tokens.authenticate(name, - tenant_id, - tenant, - password) - - def _resource_delete(self, resource): - """"Delete keystone resource.""" - r = "keystone.delete_%s" % resource.__class__.__name__.lower() - with atomic.ActionTimer(self, r): - resource.delete() - - @atomic.action_timer("keystone.create_tenant") - def _tenant_create(self, **kwargs): - """Creates keystone tenant with random name. - - :param kwargs: Other optional parameters - :returns: keystone tenant instance - """ - name = self.generate_random_name() - return self.admin_clients("keystone").tenants.create(name, **kwargs) - - @atomic.action_timer("keystone.create_service") - def _service_create(self, service_type=None, - description=None): - """Creates keystone service with random name. - - :param service_type: type of the service - :param description: description of the service - :returns: keystone service instance - """ - service_type = service_type or "rally_test_type" - description = description or self.generate_random_name() - return self.admin_clients("keystone").services.create( - self.generate_random_name(), - service_type, description=description) - - @atomic.action_timer("keystone.create_users") - def _users_create(self, tenant, users_per_tenant): - """Adds users to a tenant. - - :param tenant: tenant object - :param users_per_tenant: number of users in per tenant - """ - for i in range(users_per_tenant): - name = self.generate_random_name() - password = name - email = name + "@rally.me" - self.admin_clients("keystone").users.create( - name, password=password, email=email, tenant_id=tenant.id) - - @atomic.action_timer("keystone.create_role") - def _role_create(self, **kwargs): - """Creates keystone user role with random name. - - :param **kwargs: Optional additional arguments for roles creation - :returns: keystone user role - """ - admin_clients = keystone_wrapper.wrap(self.admin_clients("keystone")) - - role = admin_clients.create_role( - self.generate_random_name(), **kwargs) - return role - - @atomic.action_timer("keystone.role_delete") - def _role_delete(self, role_id): - """Creates keystone user role with random name. - - :param user_id: id of the role - """ - admin_clients = keystone_wrapper.wrap(self.admin_clients("keystone")) - - admin_clients.delete_role(role_id) - - @atomic.action_timer("keystone.list_users") - def _list_users(self): - """List users.""" - return self.admin_clients("keystone").users.list() - - @atomic.action_timer("keystone.list_tenants") - def _list_tenants(self): - """List tenants.""" - return self.admin_clients("keystone").tenants.list() - - @atomic.action_timer("keystone.service_list") - def _list_services(self): - """List services.""" - return self.admin_clients("keystone").services.list() - - @atomic.action_timer("keystone.list_roles") - def _list_roles_for_user(self, user, tenant): - """List user roles. - - :param user: user for whom roles will be listed - :param tenant: tenant on which user have roles - """ - return self.admin_clients("keystone").roles.roles_for_user( - user, tenant) - - @atomic.action_timer("keystone.add_role") - def _role_add(self, user, role, tenant): - """Add role to a given user on a tenant. - - :param user: user to be assigned the role to - :param role: user role to assign with - :param tenant: tenant on which assignation will take place - """ - self.admin_clients("keystone").roles.add_user_role(user, role, tenant) - - @atomic.action_timer("keystone.remove_role") - def _role_remove(self, user, role, tenant): - """Dissociate user with role. - - :param user: user to be stripped with role - :param role: role to be dissociated with user - :param tenant: tenant on which assignation took place - """ - self.admin_clients("keystone").roles.remove_user_role(user, - role, tenant) - - @atomic.action_timer("keystone.get_tenant") - def _get_tenant(self, tenant_id): - """Get given tenant. - - :param tenant_id: tenant object - """ - return self.admin_clients("keystone").tenants.get(tenant_id) - - @atomic.action_timer("keystone.get_user") - def _get_user(self, user_id): - """Get given user. - - :param user_id: user object - """ - return self.admin_clients("keystone").users.get(user_id) - - @atomic.action_timer("keystone.get_role") - def _get_role(self, role_id): - """Get given user role. - - :param role_id: user role object - """ - return self.admin_clients("keystone").roles.get(role_id) - - @atomic.action_timer("keystone.get_service") - def _get_service(self, service_id): - """Get service with given service id. - - :param service_id: id for service object - """ - return self.admin_clients("keystone").services.get(service_id) - - def _get_service_by_name(self, name): - for i in self._list_services(): - if i.name == name: - return i - - @atomic.action_timer("keystone.delete_service") - def _delete_service(self, service_id): - """Delete service. - - :param service_id: service to be deleted - """ - self.admin_clients("keystone").services.delete(service_id) - - @atomic.action_timer("keystone.update_tenant") - def _update_tenant(self, tenant, description=None): - """Update tenant name and description. - - :param tenant: tenant to be updated - :param description: tenant description to be set - """ - name = self.generate_random_name() - description = description or self.generate_random_name() - self.admin_clients("keystone").tenants.update(tenant.id, - name, description) - - @atomic.action_timer("keystone.update_user_password") - def _update_user_password(self, user_id, password): - """Update user password. - - :param user_id: id of the user - :param password: new password - """ - admin_clients = self.admin_clients("keystone") - if admin_clients.version in ["v3"]: - admin_clients.users.update(user_id, password=password) - else: - admin_clients.users.update_password(user_id, password) - - @atomic.action_timer("keystone.create_ec2creds") - def _create_ec2credentials(self, user_id, tenant_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param tenant_id: Tenant ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self.clients("keystone").ec2.create(user_id, tenant_id) - - @atomic.action_timer("keystone.list_ec2creds") - def _list_ec2credentials(self, user_id): - """List of access/secret pairs for a user_id. - - :param user_id: List all ec2-credentials for User ID - - :returns: Return ec2-credentials list - """ - return self.clients("keystone").ec2.list(user_id) - - @atomic.action_timer("keystone.delete_ec2creds") - def _delete_ec2credential(self, user_id, access): - """Delete ec2credential. - - :param user_id: User ID for which to delete credential - :param access: access key for ec2credential to delete - """ - self.clients("keystone").ec2.delete(user_id, access) diff --git a/rally/plugins/openstack/scenarios/magnum/__init__.py b/rally/plugins/openstack/scenarios/magnum/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/magnum/cluster_templates.py b/rally/plugins/openstack/scenarios/magnum/cluster_templates.py deleted file mode 100644 index 58399d54c5..0000000000 --- a/rally/plugins/openstack/scenarios/magnum/cluster_templates.py +++ /dev/null @@ -1,46 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.magnum import utils -from rally.task import validation - - -"""Scenarios for Magnum cluster_templates.""" - - -@validation.add("required_services", services=[consts.Service.MAGNUM]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["magnum"]}, - name="MagnumClusterTemplates.list_cluster_templates", - platform="openstack") -class ListClusterTemplates(utils.MagnumScenario): - - def run(self, **kwargs): - """List all cluster_templates. - - Measure the "magnum cluster_template-list" command performance. - - :param limit: (Optional) The maximum number of results to return - per request, if: - - 1) limit > 0, the maximum number of cluster_templates to return. - 2) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Magnum API - (see Magnum's api.max_limit option). - :param kwargs: optional additional arguments for cluster_templates - listing - """ - self._list_cluster_templates(**kwargs) diff --git a/rally/plugins/openstack/scenarios/magnum/clusters.py b/rally/plugins/openstack/scenarios/magnum/clusters.py deleted file mode 100644 index 1738ff17ce..0000000000 --- a/rally/plugins/openstack/scenarios/magnum/clusters.py +++ /dev/null @@ -1,83 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.magnum import utils -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.task import validation - -"""Scenarios for Magnum clusters.""" - - -@validation.add("required_services", services=[consts.Service.MAGNUM]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["magnum.clusters"]}, - name="MagnumClusters.list_clusters", - platform="openstack") -class ListClusters(utils.MagnumScenario): - - def run(self, **kwargs): - """List all clusters. - - Measure the "magnum clusters-list" command performance. - :param limit: (Optional) The maximum number of results to return - per request, if: - - 1) limit > 0, the maximum number of clusters to return. - 2) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Magnum API - (see Magnum's api.max_limit option). - - :param kwargs: optional additional arguments for clusters listing - """ - self._list_clusters(**kwargs) - - -@validation.add("required_services", services=[consts.Service.MAGNUM]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["magnum.clusters", "nova.keypairs"]}, - name="MagnumClusters.create_and_list_clusters", - platform="openstack") -class CreateAndListClusters(utils.MagnumScenario): - - def run(self, node_count, **kwargs): - """create cluster and then list all clusters. - - :param node_count: the cluster node count. - :param cluster_template_uuid: optional, if user want to use an existing - cluster_template - :param kwargs: optional additional arguments for cluster creation - """ - cluster_template_uuid = kwargs.get("cluster_template_uuid", None) - if cluster_template_uuid is None: - cluster_template_uuid = self.context["tenant"]["cluster_template"] - else: - del kwargs["cluster_template_uuid"] - - nova_scenario = nova_utils.NovaScenario({ - "user": self.context["user"], - "task": self.context["task"], - "config": {"api_versions": self.context["config"].get( - "api_versions", [])} - }) - keypair = nova_scenario._create_keypair() - - new_cluster = self._create_cluster(cluster_template_uuid, node_count, - keypair=keypair, **kwargs) - self.assertTrue(new_cluster, "Failed to create new cluster") - clusters = self._list_clusters(**kwargs) - self.assertIn(new_cluster.uuid, [cluster.uuid for cluster in clusters], - "New cluster not found in a list of clusters") diff --git a/rally/plugins/openstack/scenarios/magnum/k8s_pods.py b/rally/plugins/openstack/scenarios/magnum/k8s_pods.py deleted file mode 100644 index d3a9dbf859..0000000000 --- a/rally/plugins/openstack/scenarios/magnum/k8s_pods.py +++ /dev/null @@ -1,73 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import yaml - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.magnum import utils -from rally.task import validation - - -"""Scenarios for Kubernetes pods and rcs.""" - - -@validation.add("required_services", services=consts.Service.MAGNUM) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="K8sPods.list_pods", platform="openstack") -class ListPods(utils.MagnumScenario): - - def run(self): - """List all pods. - - """ - self._list_v1pods() - - -@validation.add("required_services", services=consts.Service.MAGNUM) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="K8sPods.create_pods", platform="openstack") -class CreatePods(utils.MagnumScenario): - - def run(self, manifests): - """create pods and wait for them to be ready. - - :param manifests: manifest files used to create the pods - """ - for manifest in manifests: - with open(manifest, "r") as f: - manifest_str = f.read() - manifest = yaml.load(manifest_str) - pod = self._create_v1pod(manifest) - msg = ("Pod isn't created") - self.assertTrue(pod, err_msg=msg) - - -@validation.add("required_services", services=consts.Service.MAGNUM) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="K8sPods.create_rcs", platform="openstack") -class CreateRcs(utils.MagnumScenario): - - def run(self, manifests): - """create rcs and wait for them to be ready. - - :param manifests: manifest files use to create the rcs - """ - for manifest in manifests: - with open(manifest, "r") as f: - manifest_str = f.read() - manifest = yaml.load(manifest_str) - rc = self._create_v1rc(manifest) - msg = ("RC isn't created") - self.assertTrue(rc, err_msg=msg) diff --git a/rally/plugins/openstack/scenarios/magnum/utils.py b/rally/plugins/openstack/scenarios/magnum/utils.py deleted file mode 100644 index dca247bcfb..0000000000 --- a/rally/plugins/openstack/scenarios/magnum/utils.py +++ /dev/null @@ -1,271 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import random -import string -import time - -from kubernetes import client as k8s_config -from kubernetes.client import api_client -from kubernetes.client.apis import core_v1_api -from kubernetes.client.rest import ApiException - -from rally.common import cfg -from rally.common import utils as common_utils -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF - - -class MagnumScenario(scenario.OpenStackScenario): - """Base class for Magnum scenarios with basic atomic actions.""" - - @atomic.action_timer("magnum.list_cluster_templates") - def _list_cluster_templates(self, **kwargs): - """Return list of cluster_templates. - - :param limit: (Optional) The maximum number of results to return - per request, if: - - 1) limit > 0, the maximum number of cluster_templates to return. - 2) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Magnum API - (see Magnum's api.max_limit option). - :param kwargs: Optional additional arguments for cluster_templates - listing - - :returns: cluster_templates list - """ - - return self.clients("magnum").cluster_templates.list(**kwargs) - - @atomic.action_timer("magnum.create_cluster_template") - def _create_cluster_template(self, **kwargs): - """Create a cluster_template - - :param kwargs: optional additional arguments for cluster_template - creation - :returns: magnum cluster_template - """ - - kwargs["name"] = self.generate_random_name() - - return self.clients("magnum").cluster_templates.create(**kwargs) - - @atomic.action_timer("magnum.get_cluster_template") - def _get_cluster_template(self, cluster_template): - """Return details of the specify cluster template. - - :param cluster_template: ID or name of the cluster template to show - :returns: clustertemplate detail - """ - return self.clients("magnum").cluster_templates.get(cluster_template) - - @atomic.action_timer("magnum.list_clusters") - def _list_clusters(self, limit=None, **kwargs): - """Return list of clusters. - - :param limit: Optional, the maximum number of results to return - per request, if: - - 1) limit > 0, the maximum number of clusters to return. - 2) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Magnum API - (see Magnum's api.max_limit option). - :param kwargs: Optional additional arguments for clusters listing - - :returns: clusters list - """ - return self.clients("magnum").clusters.list(limit=limit, **kwargs) - - @atomic.action_timer("magnum.create_cluster") - def _create_cluster(self, cluster_template, node_count, **kwargs): - """Create a cluster - - :param cluster_template: cluster_template for the cluster - :param node_count: the cluster node count - :param kwargs: optional additional arguments for cluster creation - :returns: magnum cluster - """ - - name = self.generate_random_name() - cluster = self.clients("magnum").clusters.create( - name=name, cluster_template_id=cluster_template, - node_count=node_count, **kwargs) - - common_utils.interruptable_sleep( - CONF.openstack.magnum_cluster_create_prepoll_delay) - cluster = utils.wait_for_status( - cluster, - ready_statuses=["CREATE_COMPLETE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.magnum_cluster_create_timeout, - check_interval=CONF.openstack.magnum_cluster_create_poll_interval, - id_attr="uuid" - ) - return cluster - - @atomic.action_timer("magnum.get_cluster") - def _get_cluster(self, cluster): - """Return details of the specify cluster. - - :param cluster: ID or name of the cluster to show - :returns: cluster detail - """ - return self.clients("magnum").clusters.get(cluster) - - @atomic.action_timer("magnum.get_ca_certificate") - def _get_ca_certificate(self, cluster_uuid): - """Get CA certificate for this cluster - - :param cluster_uuid: uuid of the cluster - """ - return self.clients("magnum").certificates.get(cluster_uuid) - - @atomic.action_timer("magnum.create_ca_certificate") - def _create_ca_certificate(self, csr_req): - """Send csr to Magnum to have it signed - - :param csr_req: {"cluster_uuid": , "csr": } - """ - return self.clients("magnum").certificates.create(**csr_req) - - def _get_k8s_api_client(self): - cluster_uuid = self.context["tenant"]["cluster"] - cluster = self._get_cluster(cluster_uuid) - cluster_template = self._get_cluster_template( - cluster.cluster_template_id) - key_file = None - cert_file = None - ca_certs = None - if not cluster_template.tls_disabled: - dir = self.context["ca_certs_directory"] - key_file = cluster_uuid + ".key" - key_file = os.path.join(dir, key_file) - cert_file = cluster_uuid + ".crt" - cert_file = os.path.join(dir, cert_file) - ca_certs = cluster_uuid + "_ca.crt" - ca_certs = os.path.join(dir, ca_certs) - if hasattr(k8s_config, "ConfigurationObject"): - # k8sclient < 4.0.0 - config = k8s_config.ConfigurationObject() - else: - config = k8s_config.Configuration() - config.host = cluster.api_address - config.ssl_ca_cert = ca_certs - config.cert_file = cert_file - config.key_file = key_file - client = api_client.ApiClient(config=config) - return core_v1_api.CoreV1Api(client) - - @atomic.action_timer("magnum.k8s_list_v1pods") - def _list_v1pods(self): - """List all pods. - - """ - k8s_api = self._get_k8s_api_client() - return k8s_api.list_node(namespace="default") - - @atomic.action_timer("magnum.k8s_create_v1pod") - def _create_v1pod(self, manifest): - """Create a pod on the specify cluster. - - :param manifest: manifest use to create the pod - """ - k8s_api = self._get_k8s_api_client() - podname = manifest["metadata"]["name"] + "-" - for i in range(5): - podname = podname + random.choice(string.ascii_lowercase) - manifest["metadata"]["name"] = podname - - for i in range(150): - try: - k8s_api.create_namespaced_pod(body=manifest, - namespace="default") - break - except ApiException as e: - if e.status != 403: - raise - time.sleep(2) - - start = time.time() - while True: - resp = k8s_api.read_namespaced_pod( - name=podname, namespace="default") - - if resp.status.conditions: - for condition in resp.status.conditions: - if condition.type.lower() == "ready" and \ - condition.status.lower() == "true": - return resp - - if (time.time() - start > CONF.openstack.k8s_pod_create_timeout): - raise exceptions.TimeoutException( - desired_status="Ready", - resource_name=podname, - resource_type="Pod", - resource_id=resp.metadata.uid, - resource_status=resp.status, - timeout=CONF.openstack.k8s_pod_create_timeout) - common_utils.interruptable_sleep( - CONF.openstack.k8s_pod_create_poll_interval) - - @atomic.action_timer("magnum.k8s_list_v1rcs") - def _list_v1rcs(self): - """List all rcs. - - """ - k8s_api = self._get_k8s_api_client() - return k8s_api.list_namespaced_replication_controller( - namespace="default") - - @atomic.action_timer("magnum.k8s_create_v1rc") - def _create_v1rc(self, manifest): - """Create rc on the specify cluster. - - :param manifest: manifest use to create the replication controller - """ - k8s_api = self._get_k8s_api_client() - suffix = "-" - for i in range(5): - suffix = suffix + random.choice(string.ascii_lowercase) - rcname = manifest["metadata"]["name"] + suffix - manifest["metadata"]["name"] = rcname - resp = k8s_api.create_namespaced_replication_controller( - body=manifest, - namespace="default") - expectd_status = resp.spec.replicas - start = time.time() - while True: - resp = k8s_api.read_namespaced_replication_controller( - name=rcname, - namespace="default") - status = resp.status.replicas - if status == expectd_status: - return resp - else: - if time.time() - start > CONF.openstack.k8s_rc_create_timeout: - raise exceptions.TimeoutException( - desired_status=expectd_status, - resource_name=rcname, - resource_type="ReplicationController", - resource_id=resp.metadata.uid, - resource_status=status, - timeout=CONF.openstack.k8s_rc_create_timeout) - common_utils.interruptable_sleep( - CONF.openstack.k8s_rc_create_poll_interval) diff --git a/rally/plugins/openstack/scenarios/manila/__init__.py b/rally/plugins/openstack/scenarios/manila/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/manila/shares.py b/rally/plugins/openstack/scenarios/manila/shares.py deleted file mode 100644 index cb525ef1dd..0000000000 --- a/rally/plugins/openstack/scenarios/manila/shares.py +++ /dev/null @@ -1,431 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack.context.manila import consts as manila_consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.manila import utils -from rally.task import validation - - -"""Scenarios for Manila shares.""" - - -@validation.add("enum", param_name="share_proto", - values=["NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS"], - case_insensitive=True, missed=False) -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.create_and_delete_share", - platform="openstack") -class CreateAndDeleteShare(utils.ManilaScenario): - - def run(self, share_proto, size=1, min_sleep=0, max_sleep=0, **kwargs): - """Create and delete a share. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between share creation and deletion - (of random duration from [min_sleep, max_sleep]). - - :param share_proto: share protocol, valid values are NFS, CIFS, - GlusterFS and HDFS - :param size: share size in GB, should be greater than 0 - :param min_sleep: minimum sleep time in seconds (non-negative) - :param max_sleep: maximum sleep time in seconds (non-negative) - :param kwargs: optional args to create a share - """ - share = self._create_share( - share_proto=share_proto, - size=size, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._delete_share(share) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="ManilaShares.list_shares", platform="openstack") -class ListShares(utils.ManilaScenario): - - def run(self, detailed=True, search_opts=None): - """Basic scenario for 'share list' operation. - - :param detailed: defines either to return detailed list of - objects or not. - :param search_opts: container of search opts such as - "name", "host", "share_type", etc. - """ - self._list_shares(detailed=detailed, search_opts=search_opts) - - -@validation.add("enum", param_name="share_proto", - values=["NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS"], - case_insensitive=True) -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.create_and_extend_share", - platform="openstack") -class CreateAndExtendShare(utils.ManilaScenario): - def run(self, share_proto, size=1, new_size=2, snapshot_id=None, - description=None, metadata=None, share_network=None, - share_type=None, is_public=False, availability_zone=None, - share_group_id=None): - """Create and extend a share - - :param share_proto: share protocol for new share - available values are NFS, CIFS, CephFS, GlusterFS and HDFS. - :param size: size in GiB - :param new_size: new size of the share in GiB - :param snapshot_id: ID of the snapshot - :param description: description of a share - :param metadata: optional metadata to set on share creation - :param share_network: either instance of ShareNetwork or text with ID - :param share_type: either instance of ShareType or text with ID - :param is_public: whether to set share as public or not. - :param availability_zone: availability zone of the share - :param share_group_id: ID of the share group to which the share - should belong - """ - share = self._create_share( - share_proto=share_proto, - size=size, - snapshot_id=snapshot_id, - description=description, - metadata=metadata, - share_network=share_network, - share_type=share_type, - is_public=is_public, - availability_zone=availability_zone, - share_group_id=share_group_id - ) - self._extend_share(share, new_size) - - -@validation.add("enum", param_name="share_proto", - values=["NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS"], - case_insensitive=True) -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.create_and_shrink_share", - platform="openstack") -class CreateAndShrinkShare(utils.ManilaScenario): - def run(self, share_proto, size=2, new_size=1, snapshot_id=None, - description=None, metadata=None, share_network=None, - share_type=None, is_public=False, availability_zone=None, - share_group_id=None): - """Create and shrink a share - - :param share_proto: share protocol for new share - available values are NFS, CIFS, CephFS, GlusterFS and HDFS. - :param size: size in GiB - :param new_size: new size of the share in GiB - :param snapshot_id: ID of the snapshot - :param description: description of a share - :param metadata: optional metadata to set on share creation - :param share_network: either instance of ShareNetwork or text with ID - :param share_type: either instance of ShareType or text with ID - :param is_public: whether to set share as public or not. - :param availability_zone: availability zone of the share - :param share_group_id: ID of the share group to which the share - should belong - """ - share = self._create_share( - share_proto=share_proto, - size=size, - snapshot_id=snapshot_id, - description=description, - metadata=metadata, - share_network=share_network, - share_type=share_type, - is_public=is_public, - availability_zone=availability_zone, - share_group_id=share_group_id - ) - self._shrink_share(share, new_size) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.create_share_network_and_delete", - platform="openstack") -class CreateShareNetworkAndDelete(utils.ManilaScenario): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_delete_service will be ignored", - "1.1.2", ["name"], once=True) - def run(self, neutron_net_id=None, neutron_subnet_id=None, - nova_net_id=None, name=None, description=None): - """Creates share network and then deletes. - - :param neutron_net_id: ID of Neutron network - :param neutron_subnet_id: ID of Neutron subnet - :param nova_net_id: ID of Nova network - :param description: share network description - """ - share_network = self._create_share_network( - neutron_net_id=neutron_net_id, - neutron_subnet_id=neutron_subnet_id, - nova_net_id=nova_net_id, - description=description, - ) - self._delete_share_network(share_network) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.create_share_network_and_list", - platform="openstack") -class CreateShareNetworkAndList(utils.ManilaScenario): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_delete_service will be ignored", - "1.1.2", ["name"], once=True) - def run(self, neutron_net_id=None, neutron_subnet_id=None, - nova_net_id=None, name=None, description=None, - detailed=True, search_opts=None): - """Creates share network and then lists it. - - :param neutron_net_id: ID of Neutron network - :param neutron_subnet_id: ID of Neutron subnet - :param nova_net_id: ID of Nova network - :param description: share network description - :param detailed: defines either to return detailed list of - objects or not. - :param search_opts: container of search opts such as - "name", "nova_net_id", "neutron_net_id", etc. - """ - self._create_share_network( - neutron_net_id=neutron_net_id, - neutron_subnet_id=neutron_subnet_id, - nova_net_id=nova_net_id, - description=description, - ) - self._list_share_networks( - detailed=detailed, - search_opts=search_opts, - ) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="ManilaShares.list_share_servers", - platform="openstack") -class ListShareServers(utils.ManilaScenario): - - def run(self, search_opts=None): - """Lists share servers. - - Requires admin creds. - - :param search_opts: container of following search opts: - "host", "status", "share_network" and "project_id". - """ - self._list_share_servers(search_opts=search_opts) - - -@validation.add("enum", param_name="share_proto", values=["nfs", "cephfs", - "cifs", "glusterfs", "hdfs"], missed=False, - case_insensitive=True) -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.create_share_then_allow_and_deny_access") -class CreateShareThenAllowAndDenyAccess(utils.ManilaScenario): - def run(self, share_proto, access_type, access, access_level="rw", size=1, - snapshot_id=None, description=None, metadata=None, - share_network=None, share_type=None, is_public=False, - availability_zone=None, share_group_id=None): - """Create a share and allow and deny access to it - - :param share_proto: share protocol for new share - available values are NFS, CIFS, CephFS, GlusterFS and HDFS. - :param access_type: represents the access type (e.g: 'ip', 'domain'...) - :param access: represents the object (e.g: '127.0.0.1'...) - :param access_level: access level to the share (e.g: 'rw', 'ro') - :param size: size in GiB - :param new_size: new size of the share in GiB - :param snapshot_id: ID of the snapshot - :param description: description of a share - :param metadata: optional metadata to set on share creation - :param share_network: either instance of ShareNetwork or text with ID - :param share_type: either instance of ShareType or text with ID - :param is_public: whether to set share as public or not. - :param availability_zone: availability zone of the share - :param share_group_id: ID of the share group to which the share - should belong - """ - share = self._create_share( - share_proto=share_proto, - size=size, - snapshot_id=snapshot_id, - description=description, - metadata=metadata, - share_network=share_network, - share_type=share_type, - is_public=is_public, - availability_zone=availability_zone, - share_group_id=share_group_id - ) - access_result = self._allow_access_share(share, access_type, access, - access_level) - self._deny_access_share(share, access_result["id"]) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.create_security_service_and_delete", - platform="openstack") -class CreateSecurityServiceAndDelete(utils.ManilaScenario): - - @logging.log_deprecated_args( - "The 'name' argument to create_and_delete_service will be ignored", - "1.1.2", ["name"], once=True) - def run(self, security_service_type, dns_ip=None, server=None, - domain=None, user=None, password=None, - name=None, description=None): - """Creates security service and then deletes. - - :param security_service_type: security service type, permitted values - are 'ldap', 'kerberos' or 'active_directory'. - :param dns_ip: dns ip address used inside tenant's network - :param server: security service server ip address or hostname - :param domain: security service domain - :param user: security identifier used by tenant - :param password: password used by user - :param description: security service description - """ - security_service = self._create_security_service( - security_service_type=security_service_type, - dns_ip=dns_ip, - server=server, - domain=domain, - user=user, - password=password, - description=description, - ) - self._delete_security_service(security_service) - - -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.attach_security_service_to_share_network", - platform="openstack") -class AttachSecurityServiceToShareNetwork(utils.ManilaScenario): - - def run(self, security_service_type="ldap"): - """Attaches security service to share network. - - :param security_service_type: type of security service to use. - Should be one of following: 'ldap', 'kerberos' or - 'active_directory'. - """ - sn = self._create_share_network() - ss = self._create_security_service( - security_service_type=security_service_type) - self._add_security_service_to_share_network(sn, ss) - - -@validation.add("enum", param_name="share_proto", - values=["NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS"], - case_insensitive=True) -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.create_and_list_share", - platform="openstack") -class CreateAndListShare(utils.ManilaScenario): - - def run(self, share_proto, size=1, min_sleep=0, max_sleep=0, detailed=True, - **kwargs): - """Create a share and list all shares. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between share creation and list - (of random duration from [min_sleep, max_sleep]). - - :param share_proto: share protocol, valid values are NFS, CIFS, - GlusterFS and HDFS - :param size: share size in GB, should be greater than 0 - :param min_sleep: minimum sleep time in seconds (non-negative) - :param max_sleep: maximum sleep time in seconds (non-negative) - :param detailed: defines whether to get detailed list of shares or not - :param kwargs: optional args to create a share - """ - self._create_share(share_proto=share_proto, size=size, **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._list_shares(detailed=detailed) - - -@validation.add("number", param_name="sets", minval=1, integer_only=True) -@validation.add("number", param_name="set_size", minval=1, integer_only=True) -@validation.add("number", param_name="key_min_length", minval=1, maxval=256, - integer_only=True) -@validation.add("number", param_name="key_max_length", minval=1, maxval=256, - integer_only=True) -@validation.add("number", param_name="value_min_length", minval=1, maxval=1024, - integer_only=True) -@validation.add("number", param_name="value_max_length", minval=1, maxval=1024, - integer_only=True) -@validation.add("required_services", services=[consts.Service.MANILA]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", - contexts=manila_consts.SHARES_CONTEXT_NAME) -@scenario.configure(context={"cleanup@openstack": ["manila"]}, - name="ManilaShares.set_and_delete_metadata", - platform="openstack") -class SetAndDeleteMetadata(utils.ManilaScenario): - - def run(self, sets=10, set_size=3, delete_size=3, - key_min_length=1, key_max_length=256, - value_min_length=1, value_max_length=1024): - """Sets and deletes share metadata. - - This requires a share to be created with the shares - context. Additionally, ``sets * set_size`` must be greater - than or equal to ``deletes * delete_size``. - - :param sets: how many set_metadata operations to perform - :param set_size: number of metadata keys to set in each - set_metadata operation - :param delete_size: number of metadata keys to delete in each - delete_metadata operation - :param key_min_length: minimal size of metadata key to set - :param key_max_length: maximum size of metadata key to set - :param value_min_length: minimal size of metadata value to set - :param value_max_length: maximum size of metadata value to set - """ - shares = self.context.get("tenant", {}).get("shares", []) - share = shares[self.context["iteration"] % len(shares)] - - keys = self._set_metadata( - share=share, - sets=sets, - set_size=set_size, - key_min_length=key_min_length, - key_max_length=key_max_length, - value_min_length=value_min_length, - value_max_length=value_max_length) - - self._delete_metadata(share=share, keys=keys, delete_size=delete_size) diff --git a/rally/plugins/openstack/scenarios/manila/utils.py b/rally/plugins/openstack/scenarios/manila/utils.py deleted file mode 100644 index 73b00c75f1..0000000000 --- a/rally/plugins/openstack/scenarios/manila/utils.py +++ /dev/null @@ -1,401 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import cfg -from rally import exceptions -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class ManilaScenario(scenario.OpenStackScenario): - """Base class for Manila scenarios with basic atomic actions.""" - - @atomic.action_timer("manila.create_share") - def _create_share(self, share_proto, size=1, **kwargs): - """Create a share. - - :param share_proto: share protocol for new share, - available values are NFS, CIFS, GlusterFS, HDFS and CEPHFS. - :param size: size of a share in GB - :param snapshot_id: ID of the snapshot - :param name: name of new share - :param description: description of a share - :param metadata: optional metadata to set on share creation - :param share_network: either instance of ShareNetwork or str with ID - :param share_type: either instance of ShareType or str with ID - :param is_public: defines whether to set share as public or not. - :returns: instance of :class:`Share` - """ - if self.context: - share_networks = self.context.get("tenant", {}).get( - consts.SHARE_NETWORKS_CONTEXT_NAME, {}).get( - "share_networks", []) - if share_networks and not kwargs.get("share_network"): - kwargs["share_network"] = share_networks[ - self.context["iteration"] % len(share_networks)]["id"] - - if not kwargs.get("name"): - kwargs["name"] = self.generate_random_name() - - share = self.clients("manila").shares.create( - share_proto, size, **kwargs) - - self.sleep_between(CONF.openstack.manila_share_create_prepoll_delay) - share = utils.wait_for_status( - share, - ready_statuses=["available"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.manila_share_create_timeout, - check_interval=CONF.openstack.manila_share_create_poll_interval, - ) - return share - - @atomic.action_timer("manila.delete_share") - def _delete_share(self, share): - """Delete the given share. - - :param share: :class:`Share` - """ - share.delete() - error_statuses = ("error_deleting", ) - utils.wait_for_status( - share, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(error_statuses), - timeout=CONF.openstack.manila_share_delete_timeout, - check_interval=CONF.openstack.manila_share_delete_poll_interval) - - def _get_access_from_share(self, share, access_id): - """Get access from share - - :param share: :class: `Share` - :param access_id: The id of the access we want to get - :returns: The access object from the share - :raises GetResourceNotFound: if the access is not in the share - """ - try: - return next(access for access in share.access_list() - if access.id == access_id) - except StopIteration: - raise exceptions.GetResourceNotFound(resource=access_id) - - def _update_resource_in_allow_access_share(self, share, access_id): - """Helper to update resource state in allow_access_share method - - :param share: :class:`Share` - :param access_id: id of the access - :returns: A function to be used in wait_for_status for the update - resource - """ - def _is_created(_): - return self._get_access_from_share(share, access_id) - - return _is_created - - @atomic.action_timer("manila.access_allow_share") - def _allow_access_share(self, share, access_type, access, access_level): - """Allow access to a share - - :param share: :class:`Share` - :param access_type: represents the access type (e.g: 'ip', 'domain'...) - :param access: represents the object (e.g: '127.0.0.1'...) - :param access_level: access level to the share (e.g: 'rw', 'ro') - """ - access_result = share.allow(access_type, access, access_level) - # Get access from the list of accesses of the share - access = next(access for access in share.access_list() - if access.id == access_result["id"]) - - fn = self._update_resource_in_allow_access_share(share, - access_result["id"]) - - # We check if the access in that access_list has the active state - utils.wait_for_status( - access, - ready_statuses=["active"], - update_resource=fn, - check_interval=CONF.openstack.manila_access_create_poll_interval, - timeout=CONF.openstack.manila_access_create_timeout) - - return access_result - - def _update_resource_in_deny_access_share(self, share, access_id): - """Helper to update resource state in deny_access_share method - - :param share: :class:`Share` - :param access_id: id of the access - :returns: A function to be used in wait_for_status for the update - resource - """ - def _is_deleted(_): - access = self._get_access_from_share(share, access_id) - return access - - return _is_deleted - - @atomic.action_timer("manila.access_deny_share") - def _deny_access_share(self, share, access_id): - """Deny access to a share - - :param share: :class:`Share` - :param access_id: id of the access to delete - """ - # Get the access element that was created in the first place - access = self._get_access_from_share(share, access_id) - share.deny(access_id) - - fn = self._update_resource_in_deny_access_share(share, - access_id) - - utils.wait_for_status( - access, - ready_statuses=["deleted"], - update_resource=fn, - check_deletion=True, - check_interval=CONF.openstack.manila_access_delete_poll_interval, - timeout=CONF.openstack.manila_access_delete_timeout) - - @atomic.action_timer("manila.list_shares") - def _list_shares(self, detailed=True, search_opts=None): - """Returns user shares list. - - :param detailed: defines either to return detailed list of - objects or not. - :param search_opts: container of search opts such as - "name", "host", "share_type", etc. - """ - return self.clients("manila").shares.list( - detailed=detailed, search_opts=search_opts) - - @atomic.action_timer("manila.extend_share") - def _extend_share(self, share, new_size): - """Extend the given share - - :param share: :class:`Share` - :param new_size: new size of the share - """ - share.extend(new_size) - utils.wait_for_status( - share, - ready_statuses=["available"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.manila_share_create_timeout, - check_interval=CONF.openstack.manila_share_create_poll_interval) - - @atomic.action_timer("manila.shrink_share") - def _shrink_share(self, share, new_size): - """Shrink the given share - - :param share: :class:`Share` - :param new_size: new size of the share - """ - share.shrink(new_size) - utils.wait_for_status( - share, - ready_statuses=["available"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.manila_share_create_timeout, - check_interval=CONF.openstack.manila_share_create_poll_interval) - - @atomic.action_timer("manila.create_share_network") - def _create_share_network(self, neutron_net_id=None, - neutron_subnet_id=None, - nova_net_id=None, description=None): - """Create share network. - - :param neutron_net_id: ID of Neutron network - :param neutron_subnet_id: ID of Neutron subnet - :param nova_net_id: ID of Nova network - :param description: share network description - :returns: instance of :class:`ShareNetwork` - """ - share_network = self.clients("manila").share_networks.create( - neutron_net_id=neutron_net_id, - neutron_subnet_id=neutron_subnet_id, - nova_net_id=nova_net_id, - name=self.generate_random_name(), - description=description) - return share_network - - @atomic.action_timer("manila.delete_share_network") - def _delete_share_network(self, share_network): - """Delete share network. - - :param share_network: instance of :class:`ShareNetwork`. - """ - share_network.delete() - utils.wait_for_status( - share_network, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.manila_share_delete_timeout, - check_interval=CONF.openstack.manila_share_delete_poll_interval) - - @atomic.action_timer("manila.list_share_networks") - def _list_share_networks(self, detailed=True, search_opts=None): - """List share networks. - - :param detailed: defines either to return detailed list of - objects or not. - :param search_opts: container of search opts such as - "project_id" and "name". - :returns: list of instances of :class:`ShareNetwork` - """ - share_networks = self.clients("manila").share_networks.list( - detailed=detailed, search_opts=search_opts) - return share_networks - - @atomic.action_timer("manila.list_share_servers") - def _list_share_servers(self, search_opts=None): - """List share servers. Admin only. - - :param search_opts: set of key-value pairs to filter share servers by. - Example: {"share_network": "share_network_name_or_id"} - :returns: list of instances of :class:`ShareServer` - """ - share_servers = self.admin_clients("manila").share_servers.list( - search_opts=search_opts) - return share_servers - - @atomic.action_timer("manila.create_security_service") - def _create_security_service(self, security_service_type, dns_ip=None, - server=None, domain=None, user=None, - password=None, description=None): - """Create security service. - - 'Security service' is data container in Manila that stores info - about auth services 'Active Directory', 'Kerberos' and catalog - service 'LDAP' that should be used for shares. - - :param security_service_type: security service type, permitted values - are 'ldap', 'kerberos' or 'active_directory'. - :param dns_ip: dns ip address used inside tenant's network - :param server: security service server ip address or hostname - :param domain: security service domain - :param user: security identifier used by tenant - :param password: password used by user - :param description: security service description - :returns: instance of :class:`SecurityService` - """ - security_service = self.clients("manila").security_services.create( - type=security_service_type, - dns_ip=dns_ip, - server=server, - domain=domain, - user=user, - password=password, - name=self.generate_random_name(), - description=description) - return security_service - - @atomic.action_timer("manila.delete_security_service") - def _delete_security_service(self, security_service): - """Delete security service. - - :param security_service: instance of :class:`SecurityService`. - """ - security_service.delete() - utils.wait_for_status( - security_service, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.manila_share_delete_timeout, - check_interval=CONF.openstack.manila_share_delete_poll_interval) - - @atomic.action_timer("manila.add_security_service_to_share_network") - def _add_security_service_to_share_network(self, share_network, - security_service): - """Associate given security service with a share network. - - :param share_network: ID or instance of :class:`ShareNetwork`. - :param security_service: ID or instance of :class:`SecurityService`. - :returns: instance of :class:`ShareNetwork`. - """ - share_network = self.clients( - "manila").share_networks.add_security_service( - share_network, security_service) - return share_network - - @atomic.action_timer("manila.set_metadata") - def _set_metadata(self, share, sets=1, set_size=1, - key_min_length=1, key_max_length=256, - value_min_length=1, value_max_length=1024): - """Sets share metadata. - - :param share: the share to set metadata on - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :param key_min_length: minimal size of metadata key to set - :param key_max_length: maximum size of metadata key to set - :param value_min_length: minimal size of metadata value to set - :param value_max_length: maximum size of metadata value to set - :returns: A list of keys that were set - :raises exceptions.InvalidArgumentsException: if invalid arguments - were provided. - """ - if not (key_min_length <= key_max_length and - value_min_length <= value_max_length): - raise exceptions.InvalidArgumentsException( - "Min length for keys and values of metadata can not be bigger " - "than maximum length.") - - keys = [] - for i in range(sets): - metadata = {} - for j in range(set_size): - if key_min_length == key_max_length: - key_length = key_min_length - else: - key_length = random.choice( - range(key_min_length, key_max_length)) - if value_min_length == value_max_length: - value_length = value_min_length - else: - value_length = random.choice( - range(value_min_length, value_max_length)) - key = self._generate_random_part(length=key_length) - keys.append(key) - metadata[key] = self._generate_random_part(length=value_length) - self.clients("manila").shares.set_metadata(share["id"], metadata) - - return keys - - @atomic.action_timer("manila.delete_metadata") - def _delete_metadata(self, share, keys, delete_size=3): - """Deletes share metadata. - - :param share: The share to delete metadata from. - :param delete_size: number of metadata keys to delete using one single - call. - :param keys: a list or tuple of keys to choose deletion candidates from - :raises exceptions.InvalidArgumentsException: if invalid arguments - were provided. - """ - if not (isinstance(keys, list) and keys): - raise exceptions.InvalidArgumentsException( - "Param 'keys' should be non-empty 'list'. keys = '%s'" % keys) - for i in range(0, len(keys), delete_size): - self.clients("manila").shares.delete_metadata( - share["id"], keys[i:i + delete_size]) diff --git a/rally/plugins/openstack/scenarios/mistral/__init__.py b/rally/plugins/openstack/scenarios/mistral/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/mistral/executions.py b/rally/plugins/openstack/scenarios/mistral/executions.py deleted file mode 100644 index 5c9a400bf5..0000000000 --- a/rally/plugins/openstack/scenarios/mistral/executions.py +++ /dev/null @@ -1,107 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import six -import yaml - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.mistral import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Mistral execution.""" - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MISTRAL]) -@scenario.configure(name="MistralExecutions.list_executions", - platform="openstack") -class ListExecutions(utils.MistralScenario): - - def run(self, marker="", limit=None, sort_keys="", sort_dirs=""): - """Scenario test mistral execution-list command. - - This simple scenario tests the Mistral execution-list - command by listing all the executions. - :param marker: The last execution uuid of the previous page, displays - list of executions after "marker". - :param limit: number Maximum number of executions to return in a single - result. - :param sort_keys: id,description - :param sort_dirs: [SORT_DIRS] Comma-separated list of sort directions. - Default: asc. - """ - self._list_executions(marker=marker, limit=limit, - sort_keys=sort_keys, sort_dirs=sort_dirs) - - -@types.convert(definition={"type": "file"}) -@types.convert(params={"type": "file"}) -@types.convert(wf_input={"type": "file"}) -@validation.add("file_exists", param_name="definition") -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MISTRAL]) -@validation.add("workbook_contains_workflow", - workbook_param="definition", - workflow_param="workflow_name") -@scenario.configure(name="MistralExecutions.create_execution_from_workbook", - context={"cleanup@openstack": ["mistral"]}, - platform="openstack") -class CreateExecutionFromWorkbook(utils.MistralScenario): - - def run(self, definition, workflow_name=None, wf_input=None, params=None, - do_delete=False): - """Scenario tests execution creation and deletion. - - This scenario is a very useful tool to measure the - "mistral execution-create" and "mistral execution-delete" - commands performance. - :param definition: string (yaml string) representation of given file - content (Mistral workbook definition) - :param workflow_name: string the workflow name to execute. Should be - one of the to workflows in the definition. If no - workflow_name is passed, one of the workflows in - the definition will be taken. - :param wf_input: file containing a json string of mistral workflow - input - :param params: file containing a json string of mistral params - (the string is the place to pass the environment) - :param do_delete: if False than it allows to check performance - in "create only" mode. - """ - - wb = self._create_workbook(definition) - wb_def = yaml.safe_load(wb.definition) - - if not workflow_name: - workflow_name = six.next(six.iterkeys(wb_def["workflows"])) - - workflow_identifier = ".".join([wb.name, workflow_name]) - - if not params: - params = {} - else: - params = json.loads(params) - - ex = self._create_execution(workflow_identifier, wf_input, **params) - - if do_delete: - self._delete_workbook(wb.name) - self._delete_execution(ex) diff --git a/rally/plugins/openstack/scenarios/mistral/utils.py b/rally/plugins/openstack/scenarios/mistral/utils.py deleted file mode 100644 index 147c414cee..0000000000 --- a/rally/plugins/openstack/scenarios/mistral/utils.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import yaml - -from rally.common import cfg -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class MistralScenario(scenario.OpenStackScenario): - """Base class for Mistral scenarios with basic atomic actions.""" - - @atomic.action_timer("mistral.list_workbooks") - def _list_workbooks(self): - """Gets list of existing workbooks. - - :returns: workbook list - """ - return self.clients("mistral").workbooks.list() - - @atomic.action_timer("mistral.create_workbook") - def _create_workbook(self, definition): - """Create a new workbook. - - :param definition: workbook description in string - (yaml string) format - :returns: workbook object - """ - definition = yaml.safe_load(definition) - definition["name"] = self.generate_random_name() - definition = yaml.safe_dump(definition) - - return self.clients("mistral").workbooks.create(definition) - - @atomic.action_timer("mistral.delete_workbook") - def _delete_workbook(self, wb_name): - """Delete the given workbook. - - :param wb_name: the name of workbook that would be deleted. - """ - self.clients("mistral").workbooks.delete(wb_name) - - @atomic.action_timer("mistral.list_executions") - def _list_executions(self, marker="", limit=None, sort_keys="", - sort_dirs=""): - """Gets list of existing executions. - - :returns: execution list - """ - - return self.clients("mistral").executions.list( - marker=marker, limit=limit, sort_keys=sort_keys, - sort_dirs=sort_dirs) - - @atomic.action_timer("mistral.create_execution") - def _create_execution(self, workflow_identifier, wf_input=None, **params): - """Create a new execution. - - :param workflow_identifier: name or id of the workflow to execute - :param input_: json string of mistral workflow input - :param params: optional mistral params (this is the place to pass - environment). - :returns: executions object - """ - - execution = self.clients("mistral").executions.create( - workflow_identifier, workflow_input=wf_input, **params) - - execution = utils.wait_for_status( - execution, ready_statuses=["SUCCESS"], failure_statuses=["ERROR"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.mistral_execution_timeout) - - return execution - - @atomic.action_timer("mistral.delete_execution") - def _delete_execution(self, execution): - """Delete the given execution. - - :param ex: the execution that would be deleted. - """ - self.clients("mistral").executions.delete(execution.id) diff --git a/rally/plugins/openstack/scenarios/mistral/workbooks.py b/rally/plugins/openstack/scenarios/mistral/workbooks.py deleted file mode 100644 index 5028837a38..0000000000 --- a/rally/plugins/openstack/scenarios/mistral/workbooks.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.mistral import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Mistral workbook.""" - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MISTRAL]) -@scenario.configure(name="MistralWorkbooks.list_workbooks", - platform="openstack") -class ListWorkbooks(utils.MistralScenario): - - def run(self): - """Scenario test mistral workbook-list command. - - This simple scenario tests the Mistral workbook-list - command by listing all the workbooks. - """ - self._list_workbooks() - - -@types.convert(definition={"type": "file"}) -@validation.add("file_exists", param_name="definition") -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_services", - services=[consts.Service.MISTRAL]) -@scenario.configure(context={"cleanup@openstack": ["mistral"]}, - name="MistralWorkbooks.create_workbook", - platform="openstack") -class CreateWorkbook(utils.MistralScenario): - - def run(self, definition, do_delete=False): - """Scenario tests workbook creation and deletion. - - This scenario is a very useful tool to measure the - "mistral workbook-create" and "mistral workbook-delete" - commands performance. - :param definition: string (yaml string) representation of given - file content (Mistral workbook definition) - :param do_delete: if False than it allows to check performance - in "create only" mode. - """ - wb = self._create_workbook(definition) - - if do_delete: - self._delete_workbook(wb.name) diff --git a/rally/plugins/openstack/scenarios/monasca/__init__.py b/rally/plugins/openstack/scenarios/monasca/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/monasca/metrics.py b/rally/plugins/openstack/scenarios/monasca/metrics.py deleted file mode 100644 index 2430c5d59c..0000000000 --- a/rally/plugins/openstack/scenarios/monasca/metrics.py +++ /dev/null @@ -1,36 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.monasca import utils as monascautils -from rally.task import validation - - -"""Scenarios for monasca Metrics API.""" - - -@validation.add("required_services", - services=[consts.Service.MONASCA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="MonascaMetrics.list_metrics", platform="openstack") -class ListMetrics(monascautils.MonascaScenario): - - def run(self, **kwargs): - """Fetch user's metrics. - - :param kwargs: optional arguments for list query: - name, dimensions, start_time, etc - """ - self._list_metrics(**kwargs) diff --git a/rally/plugins/openstack/scenarios/monasca/utils.py b/rally/plugins/openstack/scenarios/monasca/utils.py deleted file mode 100644 index 8b609a1100..0000000000 --- a/rally/plugins/openstack/scenarios/monasca/utils.py +++ /dev/null @@ -1,53 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random -import time -import uuid - -from rally.common import cfg -from rally.plugins.openstack import scenario -from rally.task import atomic - - -CONF = cfg.CONF - - -class MonascaScenario(scenario.OpenStackScenario): - """Base class for Monasca scenarios with basic atomic actions.""" - - @atomic.action_timer("monasca.list_metrics") - def _list_metrics(self, **kwargs): - """Get list of user's metrics. - - :param kwargs: optional arguments for list query: - name, dimensions, start_time, etc - :returns list of monasca metrics - """ - return self.clients("monasca").metrics.list(**kwargs) - - @atomic.action_timer("monasca.create_metrics") - def _create_metrics(self, **kwargs): - """Create user metrics. - - :param kwargs: attributes for metric creation: - name, dimension, timestamp, value, etc - """ - timestamp = int(time.time() * 1000) - kwargs.update({"name": self.generate_random_name(), - "timestamp": timestamp, - "value": random.random(), - "value_meta": { - "key": str(uuid.uuid4())[:10]}}) - self.clients("monasca").metrics.create(**kwargs) diff --git a/rally/plugins/openstack/scenarios/murano/__init__.py b/rally/plugins/openstack/scenarios/murano/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/murano/environments.py b/rally/plugins/openstack/scenarios/murano/environments.py deleted file mode 100644 index 4676306884..0000000000 --- a/rally/plugins/openstack/scenarios/murano/environments.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.murano import utils -from rally.task import validation - - -"""Scenarios for Murano environments.""" - - -@validation.add("required_services", services=[consts.Service.MURANO]) -@scenario.configure(name="MuranoEnvironments.list_environments", - platform="openstack") -class ListEnvironments(utils.MuranoScenario): - - def run(self): - """List the murano environments. - - Run murano environment-list for listing all environments. - """ - self._list_environments() - - -@validation.add("required_services", services=[consts.Service.MURANO]) -@scenario.configure(context={"cleanup@openstack": ["murano.environments"]}, - name="MuranoEnvironments.create_and_delete_environment", - platform="openstack") -class CreateAndDeleteEnvironment(utils.MuranoScenario): - - def run(self): - """Create environment, session and delete environment.""" - environment = self._create_environment() - - self._create_session(environment.id) - self._delete_environment(environment) - - -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_contexts", contexts=("murano_packages")) -@scenario.configure(context={"cleanup@openstack": ["murano"], - "roles@openstack": ["admin"]}, - name="MuranoEnvironments.create_and_deploy_environment", - platform="openstack") -class CreateAndDeployEnvironment(utils.MuranoScenario): - - def run(self, packages_per_env=1): - """Create environment, session and deploy environment. - - Create environment, create session, add app to environment - packages_per_env times, send environment to deploy. - - :param packages_per_env: number of packages per environment - """ - environment = self._create_environment() - session = self._create_session(environment.id) - package = self.context["tenant"]["packages"][0] - - for i in range(packages_per_env): - self._create_service(environment, session, - package.fully_qualified_name) - - self._deploy_environment(environment, session) diff --git a/rally/plugins/openstack/scenarios/murano/packages.py b/rally/plugins/openstack/scenarios/murano/packages.py deleted file mode 100644 index d74ba37a7a..0000000000 --- a/rally/plugins/openstack/scenarios/murano/packages.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import os - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.murano import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Murano packages.""" - - -@types.convert(package={"type": "expand_user_path"}) -@validation.add("file_exists", param_name="package", mode=os.F_OK) -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["murano.packages"]}, - name="MuranoPackages.import_and_list_packages", - platform="openstack") -class ImportAndListPackages(utils.MuranoScenario): - - def run(self, package, include_disabled=False): - """Import Murano package and get list of packages. - - Measure the "murano import-package" and "murano package-list" commands - performance. - It imports Murano package from "package" (if it is not a zip archive - then zip archive will be prepared) and gets list of imported packages. - - :param package: path to zip archive that represents Murano - application package or absolute path to folder with - package components - :param include_disabled: specifies whether the disabled packages will - be included in a the result or not. - Default value is False. - """ - package_path = self._zip_package(package) - try: - self._import_package(package_path) - self._list_packages(include_disabled=include_disabled) - finally: - os.remove(package_path) - - -@types.convert(package={"type": "expand_user_path"}) -@validation.add("file_exists", param_name="package", mode=os.F_OK) -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["murano.packages"]}, - name="MuranoPackages.import_and_delete_package", - platform="openstack") -class ImportAndDeletePackage(utils.MuranoScenario): - - def run(self, package): - """Import Murano package and then delete it. - - Measure the "murano import-package" and "murano package-delete" - commands performance. - It imports Murano package from "package" (if it is not a zip archive - then zip archive will be prepared) and deletes it. - - :param package: path to zip archive that represents Murano - application package or absolute path to folder with - package components - """ - package_path = self._zip_package(package) - try: - package = self._import_package(package_path) - self._delete_package(package) - finally: - os.remove(package_path) - - -@types.convert(package={"type": "expand_user_path"}) -@validation.add("file_exists", param_name="package", mode=os.F_OK) -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["murano.packages"]}, - name="MuranoPackages.package_lifecycle", - platform="openstack") -class PackageLifecycle(utils.MuranoScenario): - - def run(self, package, body, operation="replace"): - """Import Murano package, modify it and then delete it. - - Measure the Murano import, update and delete package - commands performance. - It imports Murano package from "package" (if it is not a zip archive - then zip archive will be prepared), modifies it (using data from - "body") and deletes. - - :param package: path to zip archive that represents Murano - application package or absolute path to folder with - package components - :param body: dict object that defines what package property will be - updated, e.g {"tags": ["tag"]} or {"enabled": "true"} - :param operation: string object that defines the way of how package - property will be updated, allowed operations are - "add", "replace" or "delete". - Default value is "replace". - - """ - package_path = self._zip_package(package) - try: - package = self._import_package(package_path) - self._update_package(package, body, operation) - self._delete_package(package) - finally: - os.remove(package_path) - - -@types.convert(package={"type": "expand_user_path"}) -@validation.add("file_exists", param_name="package", mode=os.F_OK) -@validation.add("required_services", services=[consts.Service.MURANO]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["murano.packages"]}, - name="MuranoPackages.import_and_filter_applications", - platform="openstack") -class ImportAndFilterApplications(utils.MuranoScenario): - - def run(self, package, filter_query): - """Import Murano package and then filter packages by some criteria. - - Measure the performance of package import and package - filtering commands. - It imports Murano package from "package" (if it is not a zip archive - then zip archive will be prepared) and filters packages by some - criteria. - - :param package: path to zip archive that represents Murano - application package or absolute path to folder with - package components - :param filter_query: dict that contains filter criteria, lately it - will be passed as **kwargs to filter method - e.g. {"category": "Web"} - """ - package_path = self._zip_package(package) - try: - self._import_package(package_path) - self._filter_applications(filter_query) - finally: - os.remove(package_path) diff --git a/rally/plugins/openstack/scenarios/murano/utils.py b/rally/plugins/openstack/scenarios/murano/utils.py deleted file mode 100644 index 84ca3c5f91..0000000000 --- a/rally/plugins/openstack/scenarios/murano/utils.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import shutil -import tempfile -import uuid -import zipfile - -import yaml - -from rally.common import cfg -from rally.common import fileutils -from rally.common import utils as common_utils -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class MuranoScenario(scenario.OpenStackScenario): - """Base class for Murano scenarios with basic atomic actions.""" - - @atomic.action_timer("murano.list_environments") - def _list_environments(self): - """Return environments list.""" - return self.clients("murano").environments.list() - - @atomic.action_timer("murano.create_environment") - def _create_environment(self): - """Create environment. - - :param env_name: String used to name environment - - :returns: Environment instance - """ - env_name = self.generate_random_name() - return self.clients("murano").environments.create({"name": env_name}) - - @atomic.action_timer("murano.delete_environment") - def _delete_environment(self, environment): - """Delete given environment. - - Return when the environment is actually deleted. - - :param environment: Environment instance - """ - self.clients("murano").environments.delete(environment.id) - - @atomic.action_timer("murano.create_session") - def _create_session(self, environment_id): - """Create session for environment with specific id - - :param environment_id: Environment id - :returns: Session instance - """ - return self.clients("murano").sessions.configure(environment_id) - - @atomic.action_timer("murano.create_service") - def _create_service(self, environment, session, full_package_name, - image_name=None, flavor_name=None): - """Create Murano service. - - :param environment: Environment instance - :param session: Session instance - :param full_package_name: full name of the Murano package - :param image_name: Image name - :param flavor_name: Flavor name - :returns: Service instance - """ - app_id = str(uuid.uuid4()) - data = {"?": {"id": app_id, - "type": full_package_name}, - "name": self.generate_random_name()} - - return self.clients("murano").services.post( - environment_id=environment.id, path="/", data=data, - session_id=session.id) - - @atomic.action_timer("murano.deploy_environment") - def _deploy_environment(self, environment, session): - """Deploy environment. - - :param environment: Environment instance - :param session: Session instance - """ - self.clients("murano").sessions.deploy(environment.id, - session.id) - - config = CONF.openstack - utils.wait_for_status( - environment, - ready_statuses=["READY"], - update_resource=utils.get_from_manager(["DEPLOY FAILURE"]), - timeout=config.murano_deploy_environment_timeout, - check_interval=config.murano_deploy_environment_check_interval - ) - - @atomic.action_timer("murano.list_packages") - def _list_packages(self, include_disabled=False): - """Returns packages list. - - :param include_disabled: if "True" then disabled packages will be - included in a the result. - Default value is False. - :returns: list of imported packages - """ - return self.clients("murano").packages.list( - include_disabled=include_disabled) - - @atomic.action_timer("murano.import_package") - def _import_package(self, package): - """Import package to the Murano. - - :param package: path to zip archive with Murano application - :returns: imported package - """ - - package = self.clients("murano").packages.create( - {}, {"file": open(package)} - ) - - return package - - @atomic.action_timer("murano.delete_package") - def _delete_package(self, package): - """Delete specified package. - - :param package: package that will be deleted - """ - - self.clients("murano").packages.delete(package.id) - - @atomic.action_timer("murano.update_package") - def _update_package(self, package, body, operation="replace"): - """Update specified package. - - :param package: package that will be updated - :param body: dict object that defines what package property will be - updated, e.g {"tags": ["tag"]} or {"enabled": "true"} - :param operation: string object that defines the way of how package - property will be updated, allowed operations are - "add", "replace" or "delete". - Default value is "replace". - :returns: updated package - """ - - return self.clients("murano").packages.update( - package.id, body, operation) - - @atomic.action_timer("murano.filter_applications") - def _filter_applications(self, filter_query): - """Filter list of uploaded application by specified criteria. - - :param filter_query: dict that contains filter criteria, it - will be passed as **kwargs to filter method - e.g. {"category": "Web"} - :returns: filtered list of packages - """ - - return self.clients("murano").packages.filter(**filter_query) - - def _zip_package(self, package_path): - """Call _prepare_package method that returns path to zip archive.""" - return MuranoPackageManager(self.task)._prepare_package(package_path) - - -class MuranoPackageManager(common_utils.RandomNameGeneratorMixin): - RESOURCE_NAME_FORMAT = "app.rally_XXXXXXXX_XXXXXXXX" - - def __init__(self, task): - self.task = task - - @staticmethod - def _read_from_file(filename): - with open(filename, "r") as f: - read_data = f.read() - return yaml.safe_load(read_data) - - @staticmethod - def _write_to_file(data, filename): - with open(filename, "w") as f: - yaml.safe_dump(data, f) - - def _change_app_fullname(self, app_dir): - """Change application full name. - - To avoid name conflict error during package import (when user - tries to import a few packages into the same tenant) need to change the - application name. For doing this need to replace following parts - in manifest.yaml - from - ... - FullName: app.name - ... - Classes: - app.name: app_class.yaml - to: - ... - FullName: - ... - Classes: - : app_class.yaml - - :param app_dir: path to directory with Murano application context - """ - - new_fullname = self.generate_random_name() - - manifest_file = os.path.join(app_dir, "manifest.yaml") - manifest = self._read_from_file(manifest_file) - - class_file_name = manifest["Classes"][manifest["FullName"]] - - # update manifest.yaml file - del manifest["Classes"][manifest["FullName"]] - manifest["FullName"] = new_fullname - manifest["Classes"][new_fullname] = class_file_name - self._write_to_file(manifest, manifest_file) - - def _prepare_package(self, package_path): - """Check whether the package path is path to zip archive or not. - - If package_path is not a path to zip archive but path to Murano - application folder, than method prepares zip archive with Murano - application. It copies directory with Murano app files to temporary - folder, changes manifest.yaml and class file (to avoid '409 Conflict' - errors in Murano) and prepares zip package. - - :param package_path: path to zip archive or directory with package - components - :returns: path to zip archive with Murano application - """ - - if not zipfile.is_zipfile(package_path): - tmp_dir = tempfile.mkdtemp() - pkg_dir = os.path.join(tmp_dir, "package/") - try: - shutil.copytree(os.path.expanduser(package_path), pkg_dir) - - self._change_app_fullname(pkg_dir) - package_path = fileutils.pack_dir(pkg_dir) - - finally: - shutil.rmtree(tmp_dir) - - return package_path diff --git a/rally/plugins/openstack/scenarios/neutron/__init__.py b/rally/plugins/openstack/scenarios/neutron/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/neutron/bgpvpn.py b/rally/plugins/openstack/scenarios/neutron/bgpvpn.py deleted file mode 100644 index f3e5a5937b..0000000000 --- a/rally/plugins/openstack/scenarios/neutron/bgpvpn.py +++ /dev/null @@ -1,344 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import random - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import validation - - -def _create_random_route_target(): - return "{}:{}".format(random.randint(0, 65535), - random.randint(0, 4294967295)) - -"""Scenarios for Neutron Networking-Bgpvpn.""" - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_platform", platform="openstack", admin=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, - name="NeutronBGPVPN.create_and_delete_bgpvpns", - platform="openstack") -class CreateAndDeleteBgpvpns(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Create bgpvpn and delete the bgpvpn. - - Measure the "neutron bgpvpn-create" and neutron bgpvpn-delete - command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type) - self._delete_bgpvpn(bgpvpn) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, - name="NeutronBGPVPN.create_and_list_bgpvpns", - platform="openstack") -class CreateAndListBgpvpns(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Create a bgpvpn and then list all bgpvpns - - Measure the "neutron bgpvpn-list" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type) - bgpvpns = self._list_bgpvpns() - self.assertIn(bgpvpn["bgpvpn"]["id"], [b["id"] for b in bgpvpns]) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, - name="NeutronBGPVPN.create_and_update_bgpvpns", - platform="openstack") -class CreateAndUpdateBgpvpns(utils.NeutronScenario): - - def run(self, update_name=False, route_targets=None, - import_targets=None, export_targets=None, - route_distinguishers=None, updated_route_targets=None, - updated_import_targets=None, updated_export_targets=None, - updated_route_distinguishers=None, bgpvpn_type="l3"): - """Create and Update bgpvpns - - Measure the "neutron bgpvpn-update" command performance. - - :param update_name: bool, whether or not to modify BGP VPN name - :param route_targets: Route Targets that will be both imported - and used for export - :param updated_route_targets: Updated Route Targets that will be both - imported and used for export - :param import_targets: Additional Route Targets that will be imported - :param updated_import_targets: Updated additional Route Targets that - will be imported - :param export_targets: additional Route Targets that will be used - for export. - :param updated_export_targets: Updated additional Route Targets that - will be used for export. - :param route_distinguishers: list of route distinguisher strings - :param updated_route_distinguishers: Updated list of route - distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - create_bgpvpn_args = { - "route_targets": route_targets, - "import_targets": import_targets, - "export_targets": export_targets, - "route_distinguishers": route_distinguishers, - "type": bgpvpn_type - } - bgpvpn = self._create_bgpvpn(**create_bgpvpn_args) - update_bgpvpn_args = { - "update_name": update_name, - "route_targets": updated_route_targets, - "import_targets": updated_import_targets, - "export_targets": updated_export_targets, - "route_distinguishers": updated_route_distinguishers, - } - self._update_bgpvpn(bgpvpn, **update_bgpvpn_args) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@validation.add("required_contexts", contexts=["network", "servers"]) -@scenario.configure(context={"admin_cleanup@openstack": ["neutron"], - "cleanup@openstack": ["neutron"]}, - name="NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks", - platform="openstack") -class CreateAndAssociateDissassociateNetworks(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Associate a network and disassociate it from a BGP VPN. - - Measure the "neutron bgpvpn-create", "neutron bgpvpn-net-assoc-create" - and "neutron bgpvpn-net-assoc-delete" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - networks = self.context.get("tenant", {}).get("networks", []) - network = networks[0] - if not route_targets: - route_targets = _create_random_route_target() - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type, - tenant_id=network["tenant_id"]) - net_asso = self._create_bgpvpn_network_assoc(bgpvpn, network) - self._delete_bgpvpn_network_assoc(bgpvpn, net_asso) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@validation.add("required_contexts", contexts=["network", "servers"]) -@scenario.configure(context={"admin_cleanup@openstack": ["neutron"], - "cleanup@openstack": ["neutron"]}, - name="NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers", - platform="openstack") -class CreateAndAssociateDissassociateRouters(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Associate a router and disassociate it from a BGP VPN. - - Measure the "neutron bgpvpn-create", - "neutron bgpvpn-router-assoc-create" and - "neutron bgpvpn-router-assoc-delete" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - - router = { - "id": self.context["tenant"]["networks"][0]["router_id"]} - tenant_id = self.context["tenant"]["id"] - if not route_targets: - route_targets = _create_random_route_target() - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type, - tenant_id=tenant_id) - router_asso = self._create_bgpvpn_router_assoc(bgpvpn, router) - self._delete_bgpvpn_router_assoc(bgpvpn, router_asso) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@validation.add("required_contexts", contexts=["network", "servers"]) -@scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, - name="NeutronBGPVPN.create_and_list_networks_associations", - platform="openstack") -class CreateAndListNetworksAssocs(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Associate a network and list networks associations. - - Measure the "neutron bgpvpn-create", - "neutron bgpvpn-net-assoc-create" and - "neutron bgpvpn-net-assoc-list" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - - networks = self.context.get("tenant", {}).get("networks", []) - network = networks[0] - if not route_targets: - route_targets = _create_random_route_target() - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type, - tenant_id=network["tenant_id"]) - self._create_bgpvpn_network_assoc(bgpvpn, network) - net_assocs = self._list_bgpvpn_network_assocs( - bgpvpn)["network_associations"] - - network_id = network["id"] - msg = ("Network not included into list of associated networks\n" - "Network created: {}\n" - "List of associations: {}").format(network, net_assocs) - list_networks = [net_assoc["network_id"] for net_assoc in net_assocs] - self.assertIn(network_id, list_networks, err_msg=msg) - - -@validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], - missed=True) -@validation.add("required_neutron_extensions", extensions=["bgpvpn"]) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@validation.add("required_contexts", contexts=["network", "servers"]) -@scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, - name="NeutronBGPVPN.create_and_list_routers_associations", - platform="openstack") -class CreateAndListRoutersAssocs(utils.NeutronScenario): - - def run(self, route_targets=None, import_targets=None, - export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): - """Associate a router and list routers associations. - - Measure the "neutron bgpvpn-create", - "neutron bgpvpn-router-assoc-create" and - "neutron bgpvpn-router-assoc-list" command performance. - - :param route_targets: Route Targets that will be both imported and - used for export - :param import_targets: Additional Route Targets that will be imported - :param export_targets: Additional Route Targets that will be used - for export. - :param route_distinguishers: List of route distinguisher strings - :param bgpvpn_type: type of VPN and the technology behind it. - Acceptable formats: l2 and l3 - """ - - router = { - "id": self.context["tenant"]["networks"][0]["router_id"]} - tenant_id = self.context["tenant"]["id"] - if not route_targets: - route_targets = _create_random_route_target() - - bgpvpn = self._create_bgpvpn(route_targets=route_targets, - import_targets=import_targets, - export_targets=export_targets, - route_distinguishers=route_distinguishers, - type=bgpvpn_type, - tenant_id=tenant_id) - self._create_bgpvpn_router_assoc(bgpvpn, router) - router_assocs = self._list_bgpvpn_router_assocs( - bgpvpn)["router_associations"] - - router_id = router["id"] - msg = ("Router not included into list of associated routers\n" - "Router created: {}\n" - "List of associations: {}").format(router, router_assocs) - - list_routers = [r_assoc["router_id"] for r_assoc in router_assocs] - self.assertIn(router_id, list_routers, err_msg=msg) diff --git a/rally/plugins/openstack/scenarios/neutron/loadbalancer_v1.py b/rally/plugins/openstack/scenarios/neutron/loadbalancer_v1.py deleted file mode 100644 index ec7fbc4611..0000000000 --- a/rally/plugins/openstack/scenarios/neutron/loadbalancer_v1.py +++ /dev/null @@ -1,287 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import validation - - -"""Scenarios for Neutron Loadbalancer v1.""" - - -@validation.add("restricted_parameters", param_names="subnet_id", - subdict="pool_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_list_pools", - platform="openstack") -class CreateAndListPools(utils.NeutronScenario): - - def run(self, pool_create_args=None): - """Create a pool(v1) and then list pools(v1). - - Measure the "neutron lb-pool-list" command performance. - The scenario creates a pool for every subnet and then lists pools. - - :param pool_create_args: dict, POST /lb/pools request options - """ - pool_create_args = pool_create_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - self._create_v1_pools(networks, **pool_create_args) - self._list_v1_pools() - - -@validation.add("restricted_parameters", param_names="subnet_id", - subdict="pool_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_delete_pools", - platform="openstack") -class CreateAndDeletePools(utils.NeutronScenario): - - def run(self, pool_create_args=None): - """Create pools(v1) and delete pools(v1). - - Measure the "neutron lb-pool-create" and "neutron lb-pool-delete" - command performance. The scenario creates a pool for every subnet - and then deletes those pools. - - :param pool_create_args: dict, POST /lb/pools request options - """ - pool_create_args = pool_create_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - for pool in pools: - self._delete_v1_pool(pool["pool"]) - - -@validation.add("restricted_parameters", param_names="subnet_id", - subdict="pool_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_update_pools", - platform="openstack") -class CreateAndUpdatePools(utils.NeutronScenario): - - def run(self, pool_update_args=None, pool_create_args=None): - """Create pools(v1) and update pools(v1). - - Measure the "neutron lb-pool-create" and "neutron lb-pool-update" - command performance. The scenario creates a pool for every subnet - and then update those pools. - - :param pool_create_args: dict, POST /lb/pools request options - :param pool_update_args: dict, POST /lb/pools update options - """ - pool_create_args = pool_create_args or {} - pool_update_args = pool_update_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - for pool in pools: - self._update_v1_pool(pool, **pool_update_args) - - -@validation.add("restricted_parameters", param_names=["pool_id", "subnet_id"], - subdict="vip_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_list_vips", - platform="openstack") -class CreateAndListVips(utils.NeutronScenario): - - def run(self, pool_create_args=None, vip_create_args=None): - """Create a vip(v1) and then list vips(v1). - - Measure the "neutron lb-vip-create" and "neutron lb-vip-list" command - performance. The scenario creates a vip for every pool created and - then lists vips. - - :param vip_create_args: dict, POST /lb/vips request options - :param pool_create_args: dict, POST /lb/pools request options - """ - vip_create_args = vip_create_args or {} - pool_create_args = pool_create_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - for pool in pools: - self._create_v1_vip(pool, **vip_create_args) - self._list_v1_vips() - - -@validation.add("restricted_parameters", param_names=["pool_id", "subnet_id"], - subdict="vip_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_delete_vips", - platform="openstack") -class CreateAndDeleteVips(utils.NeutronScenario): - - def run(self, pool_create_args=None, vip_create_args=None): - """Create a vip(v1) and then delete vips(v1). - - Measure the "neutron lb-vip-create" and "neutron lb-vip-delete" - command performance. The scenario creates a vip for pool and - then deletes those vips. - - :param pool_create_args: dict, POST /lb/pools request options - :param vip_create_args: dict, POST /lb/vips request options - """ - vips = [] - pool_create_args = pool_create_args or {} - vip_create_args = vip_create_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - for pool in pools: - vips.append(self._create_v1_vip(pool, **vip_create_args)) - for vip in vips: - self._delete_v1_vip(vip["vip"]) - - -@validation.add("restricted_parameters", param_names=["pool_id", "subnet_id"], - subdict="vip_create_args") -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_update_vips", - platform="openstack") -class CreateAndUpdateVips(utils.NeutronScenario): - - def run(self, pool_create_args=None, - vip_update_args=None, vip_create_args=None): - """Create vips(v1) and update vips(v1). - - Measure the "neutron lb-vip-create" and "neutron lb-vip-update" - command performance. The scenario creates a pool for every subnet - and then update those pools. - - :param pool_create_args: dict, POST /lb/pools request options - :param vip_create_args: dict, POST /lb/vips request options - :param vip_update_args: dict, POST /lb/vips update options - """ - vips = [] - pool_create_args = pool_create_args or {} - vip_create_args = vip_create_args or {} - vip_update_args = vip_update_args or {} - networks = self.context.get("tenant", {}).get("networks", []) - pools = self._create_v1_pools(networks, **pool_create_args) - for pool in pools: - vips.append(self._create_v1_vip(pool, **vip_create_args)) - for vip in vips: - self._update_v1_vip(vip, **vip_update_args) - - -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_list_healthmonitors", - platform="openstack") -class CreateAndListHealthmonitors(utils.NeutronScenario): - - def run(self, healthmonitor_create_args=None): - """Create healthmonitors(v1) and list healthmonitors(v1). - - Measure the "neutron lb-healthmonitor-list" command performance. This - scenario creates healthmonitors and lists them. - - :param healthmonitor_create_args: dict, POST /lb/healthmonitors request - options - """ - healthmonitor_create_args = healthmonitor_create_args or {} - self._create_v1_healthmonitor(**healthmonitor_create_args) - self._list_v1_healthmonitors() - - -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_delete_healthmonitors", - platform="openstack") -class CreateAndDeleteHealthmonitors(utils.NeutronScenario): - - def run(self, healthmonitor_create_args=None): - """Create a healthmonitor(v1) and delete healthmonitors(v1). - - Measure the "neutron lb-healthmonitor-create" and "neutron - lb-healthmonitor-delete" command performance. The scenario creates - healthmonitors and deletes those healthmonitors. - - :param healthmonitor_create_args: dict, POST /lb/healthmonitors request - options - """ - healthmonitor_create_args = healthmonitor_create_args or {} - healthmonitor = self._create_v1_healthmonitor( - **healthmonitor_create_args) - self._delete_v1_healthmonitor(healthmonitor["health_monitor"]) - - -@validation.add("required_neutron_extensions", extensions=["lbaas"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV1.create_and_update_healthmonitors", - platform="openstack") -class CreateAndUpdateHealthmonitors(utils.NeutronScenario): - - def run(self, healthmonitor_create_args=None, - healthmonitor_update_args=None): - """Create a healthmonitor(v1) and update healthmonitors(v1). - - Measure the "neutron lb-healthmonitor-create" and "neutron - lb-healthmonitor-update" command performance. The scenario creates - healthmonitors and then updates them. - - :param healthmonitor_create_args: dict, POST /lb/healthmonitors request - options - :param healthmonitor_update_args: dict, POST /lb/healthmonitors update - options - """ - healthmonitor_create_args = healthmonitor_create_args or {} - healthmonitor_update_args = healthmonitor_update_args or { - "max_retries": random.choice(range(1, 10))} - healthmonitor = self._create_v1_healthmonitor( - **healthmonitor_create_args) - self._update_v1_healthmonitor(healthmonitor, - **healthmonitor_update_args) diff --git a/rally/plugins/openstack/scenarios/neutron/loadbalancer_v2.py b/rally/plugins/openstack/scenarios/neutron/loadbalancer_v2.py deleted file mode 100755 index c81e6b928f..0000000000 --- a/rally/plugins/openstack/scenarios/neutron/loadbalancer_v2.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import validation - - -"""Scenarios for Neutron Loadbalancer v2.""" - - -@validation.add("required_neutron_extensions", extensions=["lbaasv2"]) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronLoadbalancerV2.create_and_list_loadbalancers", - platform="openstack") -class CreateAndListLoadbalancers(utils.NeutronScenario): - - def run(self, lb_create_args=None): - """Create a loadbalancer(v2) and then list loadbalancers(v2). - - Measure the "neutron lbaas-loadbalancer-list" command performance. - The scenario creates a loadbalancer for every subnet and then lists - loadbalancers. - - :param lb_create_args: dict, POST /lbaas/loadbalancers - request options - """ - lb_create_args = lb_create_args or {} - subnets = [] - networks = self.context.get("tenant", {}).get("networks", []) - for network in networks: - subnets.extend(network.get("subnets", [])) - for subnet_id in subnets: - self._create_lbaasv2_loadbalancer(subnet_id, **lb_create_args) - self._list_lbaasv2_loadbalancers() diff --git a/rally/plugins/openstack/scenarios/neutron/network.py b/rally/plugins/openstack/scenarios/neutron/network.py deleted file mode 100644 index bc6a9fc628..0000000000 --- a/rally/plugins/openstack/scenarios/neutron/network.py +++ /dev/null @@ -1,605 +0,0 @@ -# Copyright 2014: Intel Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import validation - - -"""Scenarios for Neutron.""" - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_list_networks", - platform="openstack") -class CreateAndListNetworks(utils.NeutronScenario): - - def run(self, network_create_args=None): - """Create a network and then list all networks. - - Measure the "neutron net-list" command performance. - - If you have only 1 user in your context, you will - add 1 network on every iteration. So you will have more - and more networks and will be able to measure the - performance of the "neutron net-list" command depending on - the number of networks owned by users. - - :param network_create_args: dict, POST /v2.0/networks request options - """ - self._create_network(network_create_args or {}) - self._list_networks() - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_show_network", - platform="openstack") -class CreateAndShowNetwork(utils.NeutronScenario): - - def run(self, network_create_args=None): - """Create a network and show network details. - - Measure the "neutron net-show" command performance. - - :param network_create_args: dict, POST /v2.0/networks request options - """ - network = self._create_network(network_create_args or {}) - self._show_network(network) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_update_networks", - platform="openstack") -class CreateAndUpdateNetworks(utils.NeutronScenario): - - def run(self, network_update_args, network_create_args=None): - """Create and update a network. - - Measure the "neutron net-create and net-update" command performance. - - :param network_update_args: dict, PUT /v2.0/networks update request - :param network_create_args: dict, POST /v2.0/networks request options - """ - network = self._create_network(network_create_args or {}) - self._update_network(network, network_update_args) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_delete_networks", - platform="openstack") -class CreateAndDeleteNetworks(utils.NeutronScenario): - - def run(self, network_create_args=None): - """Create and delete a network. - - Measure the "neutron net-create" and "net-delete" command performance. - - :param network_create_args: dict, POST /v2.0/networks request options - """ - network = self._create_network(network_create_args or {}) - self._delete_network(network["network"]) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_list_subnets", - platform="openstack") -class CreateAndListSubnets(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1): - """Create and a given number of subnets and list all subnets. - - The scenario creates a network, a given number of subnets and then - lists subnets. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - """ - network = self._create_network(network_create_args or {}) - self._create_subnets(network, subnet_create_args, subnet_cidr_start, - subnets_per_network) - self._list_subnets() - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_update_subnets", - platform="openstack") -class CreateAndUpdateSubnets(utils.NeutronScenario): - - def run(self, subnet_update_args, network_create_args=None, - subnet_create_args=None, subnet_cidr_start=None, - subnets_per_network=1): - """Create and update a subnet. - - The scenario creates a network, a given number of subnets - and then updates the subnet. This scenario measures the - "neutron subnet-update" command performance. - - :param subnet_update_args: dict, PUT /v2.0/subnets update options - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - """ - network = self._create_network(network_create_args or {}) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, subnets_per_network) - - for subnet in subnets: - self._update_subnet(subnet, subnet_update_args) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_show_subnets", - platform="openstack") -class CreateAndShowSubnets(utils.NeutronScenario): - - def run(self, network_create_args=None, - subnet_create_args=None, subnet_cidr_start=None, - subnets_per_network=1): - """Create and show a subnet details. - - The scenario creates a network, a given number of subnets - and show the subnet details. This scenario measures the - "neutron subnet-show" command performance. - - :param network_create_args: dict, POST /v2.0/networks request - options. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - """ - network = self._get_or_create_network(network_create_args) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, subnets_per_network) - - for subnet in subnets: - self._show_subnet(subnet) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_delete_subnets", - platform="openstack") -class CreateAndDeleteSubnets(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1): - """Create and delete a given number of subnets. - - The scenario creates a network, a given number of subnets and then - deletes subnets. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - """ - network = self._get_or_create_network(network_create_args) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, subnets_per_network) - - for subnet in subnets: - self._delete_subnet(subnet) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_list_routers", - platform="openstack") -class CreateAndListRouters(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1, - router_create_args=None): - """Create and a given number of routers and list all routers. - - Create a network, a given number of subnets and routers - and then list all routers. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :param router_create_args: dict, POST /v2.0/routers request options - """ - self._create_network_structure(network_create_args, subnet_create_args, - subnet_cidr_start, subnets_per_network, - router_create_args) - self._list_routers() - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_show_routers", - platform="openstack") -class CreateAndShowRouters(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1, - router_create_args=None): - """Create and show a given number of routers. - - Create a network, a given number of subnets and routers - and then show all routers. - - :param network_create_args: dict, POST /v2.0/networks request - options - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for each network - :param router_create_args: dict, POST /v2.0/routers request options - """ - network, subnets, routers = self._create_network_structure( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - - for router in routers: - self._show_router(router) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_update_routers", - platform="openstack") -class CreateAndUpdateRouters(utils.NeutronScenario): - - def run(self, router_update_args, network_create_args=None, - subnet_create_args=None, subnet_cidr_start=None, - subnets_per_network=1, router_create_args=None): - """Create and update a given number of routers. - - Create a network, a given number of subnets and routers - and then updating all routers. - - :param router_update_args: dict, PUT /v2.0/routers update options - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :param router_create_args: dict, POST /v2.0/routers request options - """ - network, subnets, routers = self._create_network_structure( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - - for router in routers: - self._update_router(router, router_update_args) - - -@validation.add("number", param_name="subnets_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_delete_routers", - platform="openstack") -class CreateAndDeleteRouters(utils.NeutronScenario): - - def run(self, network_create_args=None, subnet_create_args=None, - subnet_cidr_start=None, subnets_per_network=1, - router_create_args=None): - """Create and delete a given number of routers. - - Create a network, a given number of subnets and routers - and then delete all routers. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :param router_create_args: dict, POST /v2.0/routers request options - """ - network, subnets, routers = self._create_network_structure( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - - for e in range(subnets_per_network): - router = routers[e] - subnet = subnets[e] - self._remove_interface_router(subnet["subnet"], router["router"]) - self._delete_router(router) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.set_and_clear_router_gateway", - platform="openstack") -class SetAndClearRouterGateway(utils.NeutronScenario): - - def run(self, enable_snat=True, network_create_args=None, - router_create_args=None): - """Set and Remove the external network gateway from a router. - - create an external network and a router, set external network - gateway for the router, remove the external network gateway from - the router. - - :param enable_snat: True if enable snat - :param network_create_args: dict, POST /v2.0/networks request - options - :param router_create_args: dict, POST /v2.0/routers request options - """ - network_create_args = network_create_args or {} - router_create_args = router_create_args or {} - ext_net = self._create_network(network_create_args) - router = self._create_router(router_create_args) - self._add_gateway_router(router, ext_net, enable_snat) - self._remove_gateway_router(router) - - -@validation.add("number", param_name="ports_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_list_ports", - platform="openstack") -class CreateAndListPorts(utils.NeutronScenario): - - def run(self, network_create_args=None, - port_create_args=None, ports_per_network=1): - """Create and a given number of ports and list all ports. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param port_create_args: dict, POST /v2.0/ports request options - :param ports_per_network: int, number of ports for one network - """ - network = self._get_or_create_network(network_create_args) - for i in range(ports_per_network): - self._create_port(network, port_create_args or {}) - - self._list_ports() - - -@validation.add("number", param_name="ports_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_update_ports", - platform="openstack") -class CreateAndUpdatePorts(utils.NeutronScenario): - - def run(self, port_update_args, network_create_args=None, - port_create_args=None, ports_per_network=1): - """Create and update a given number of ports. - - Measure the "neutron port-create" and "neutron port-update" commands - performance. - - :param port_update_args: dict, PUT /v2.0/ports update request options - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param port_create_args: dict, POST /v2.0/ports request options - :param ports_per_network: int, number of ports for one network - """ - network = self._get_or_create_network(network_create_args) - for i in range(ports_per_network): - port = self._create_port(network, port_create_args) - self._update_port(port, port_update_args) - - -@validation.add("number", param_name="ports_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_show_ports", - platform="openstack") -class CreateAndShowPorts(utils.NeutronScenario): - - def run(self, network_create_args=None, - port_create_args=None, ports_per_network=1): - """Create a given number of ports and show created ports in trun. - - Measure the "neutron port-create" and "neutron port-show" commands - performance. - - :param network_create_args: dict, POST /v2.0/networks request - options. - :param port_create_args: dict, POST /v2.0/ports request options - :param ports_per_network: int, number of ports for one network - """ - network_create_args = network_create_args or {} - port_create_args = port_create_args or {} - - network = self._get_or_create_network(network_create_args) - for i in range(ports_per_network): - port = self._create_port(network, port_create_args) - msg = "Port isn't created" - self.assertTrue(port, err_msg=msg) - - port_info = self._show_port(port) - msg = "Created port and Showed port isn't equal" - self.assertEqual(port["port"]["id"], port_info["port"]["id"], - err_msg=msg) - - -@validation.add("number", param_name="ports_per_network", minval=1, - integer_only=True) -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_delete_ports", - platform="openstack") -class CreateAndDeletePorts(utils.NeutronScenario): - - def run(self, network_create_args=None, - port_create_args=None, ports_per_network=1): - """Create and delete a port. - - Measure the "neutron port-create" and "neutron port-delete" - commands performance. - - :param network_create_args: dict, POST /v2.0/networks request - options. Deprecated. - :param port_create_args: dict, POST /v2.0/ports request options - :param ports_per_network: int, number of ports for one network - """ - network = self._get_or_create_network(network_create_args) - for i in range(ports_per_network): - port = self._create_port(network, port_create_args) - self._delete_port(port) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("external_network_exists", param_name="floating_network") -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_list_floating_ips", - platform="openstack") -class CreateAndListFloatingIps(utils.NeutronScenario): - - def run(self, floating_network=None, floating_ip_args=None): - """Create and list floating IPs. - - Measure the "neutron floating-ip-create" and "neutron floating-ip-list" - commands performance. - - :param floating_network: str, external network for floating IP creation - :param floating_ip_args: dict, POST /floatingips request options - """ - floating_ip_args = floating_ip_args or {} - self._create_floatingip(floating_network, **floating_ip_args) - self._list_floating_ips() - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("external_network_exists", param_name="floating_network") -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronNetworks.create_and_delete_floating_ips", - platform="openstack") -class CreateAndDeleteFloatingIps(utils.NeutronScenario): - - def run(self, floating_network=None, floating_ip_args=None): - """Create and delete floating IPs. - - Measure the "neutron floating-ip-create" and "neutron - floating-ip-delete" commands performance. - - :param floating_network: str, external network for floating IP creation - :param floating_ip_args: dict, POST /floatingips request options - """ - floating_ip_args = floating_ip_args or {} - floating_ip = self._create_floatingip(floating_network, - **floating_ip_args) - self._delete_floating_ip(floating_ip["floatingip"]) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="NeutronNetworks.list_agents", platform="openstack") -class ListAgents(utils.NeutronScenario): - - def run(self, agent_args=None): - """List all neutron agents. - - This simple scenario tests the "neutron agent-list" command by - listing all the neutron agents. - - :param agent_args: dict, POST /v2.0/agents request options - """ - agent_args = agent_args or {} - self._list_agents(**agent_args) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_contexts", contexts=["network"]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["neutron"]}, - name="NeutronSubnets.delete_subnets", - platform="openstack") -class DeleteSubnets(utils.NeutronScenario): - - def run(self): - """Delete a subnet that belongs to each precreated network. - - Each runner instance picks a specific subnet from the list based on its - positional location in the list of users. By doing so, we can start - multiple threads with sufficient number of users created and spread - delete requests across all of them, so that they hit different subnets - concurrently. - - Concurrent execution of this scenario should help reveal any race - conditions and other concurrency issues in Neutron IP allocation layer, - among other things. - """ - tenant_id = self.context["tenant"]["id"] - users = self.context["tenants"][tenant_id]["users"] - number = users.index(self.context["user"]) - for network in self.context["tenants"][tenant_id]["networks"]: - # delete one of subnets based on the user sequential number - subnet_id = network["subnets"][number] - self._delete_subnet({"subnet": {"id": subnet_id}}) diff --git a/rally/plugins/openstack/scenarios/neutron/security_groups.py b/rally/plugins/openstack/scenarios/neutron/security_groups.py deleted file mode 100644 index 86f65d71e7..0000000000 --- a/rally/plugins/openstack/scenarios/neutron/security_groups.py +++ /dev/null @@ -1,236 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.neutron import utils -from rally.task import validation - - -"""Scenarios for Neutron Security Groups.""" - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronSecurityGroup.create_and_list_security_groups", - platform="openstack") -class CreateAndListSecurityGroups(utils.NeutronScenario): - - def run(self, security_group_create_args=None): - """Create and list Neutron security-groups. - - Measure the "neutron security-group-create" and "neutron - security-group-list" command performance. - - :param security_group_create_args: dict, POST /v2.0/security-groups - request options - """ - security_group_create_args = security_group_create_args or {} - self._create_security_group(**security_group_create_args) - self._list_security_groups() - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronSecurityGroup.create_and_show_security_group", - platform="openstack") -class CreateAndShowSecurityGroup(utils.NeutronScenario): - - def run(self, security_group_create_args=None): - """Create and show Neutron security-group. - - Measure the "neutron security-group-create" and "neutron - security-group-show" command performance. - - :param security_group_create_args: dict, POST /v2.0/security-groups - request options - """ - security_group_create_args = security_group_create_args or {} - security_group = self._create_security_group( - **security_group_create_args) - msg = "security_group isn't created" - self.assertTrue(security_group, err_msg=msg) - - self._show_security_group(security_group) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronSecurityGroup.create_and_delete_security_groups", - platform="openstack") -class CreateAndDeleteSecurityGroups(utils.NeutronScenario): - - def run(self, security_group_create_args=None): - """Create and delete Neutron security-groups. - - Measure the "neutron security-group-create" and "neutron - security-group-delete" command performance. - - :param security_group_create_args: dict, POST /v2.0/security-groups - request options - """ - security_group_create_args = security_group_create_args or {} - security_group = self._create_security_group( - **security_group_create_args) - self._delete_security_group(security_group) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronSecurityGroup.create_and_update_security_groups", - platform="openstack") -class CreateAndUpdateSecurityGroups(utils.NeutronScenario): - - def run(self, security_group_create_args=None, - security_group_update_args=None): - """Create and update Neutron security-groups. - - Measure the "neutron security-group-create" and "neutron - security-group-update" command performance. - - :param security_group_create_args: dict, POST /v2.0/security-groups - request options - :param security_group_update_args: dict, PUT /v2.0/security-groups - update options - """ - security_group_create_args = security_group_create_args or {} - security_group_update_args = security_group_update_args or {} - security_group = self._create_security_group( - **security_group_create_args) - self._update_security_group(security_group, - **security_group_update_args) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronSecurityGroup.create_and_list_security_group_rules", - platform="openstack") -class CreateAndListSecurityGroupRules(utils.NeutronScenario): - - def run(self, security_group_args=None, - security_group_rule_args=None): - """Create and list Neutron security-group-rules. - - Measure the "neutron security-group-rule-create" and "neutron - security-group-rule-list" command performance. - - :param security_group_args: dict, POST /v2.0/security-groups - request options - :param security_group_rule_args: dict, - POST /v2.0/security-group-rules request options - """ - security_group_args = security_group_args or {} - security_group_rule_args = security_group_rule_args or {} - - security_group = self._create_security_group(**security_group_args) - msg = "security_group isn't created" - self.assertTrue(security_group, err_msg=msg) - - security_group_rule = self._create_security_group_rule( - security_group["security_group"]["id"], **security_group_rule_args) - msg = "security_group_rule isn't created" - self.assertTrue(security_group_rule, err_msg=msg) - - security_group_rules = self._list_security_group_rules() - self.assertIn(security_group_rule["security_group_rule"]["id"], - [sgr["id"] for sgr - in security_group_rules["security_group_rules"]]) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronSecurityGroup.create_and_show_security_group_rule", - platform="openstack") -class CreateAndShowSecurityGroupRule(utils.NeutronScenario): - - def run(self, security_group_args=None, - security_group_rule_args=None): - """Create and show Neutron security-group-rule. - - Measure the "neutron security-group-rule-create" and "neutron - security-group-rule-show" command performance. - - :param security_group_args: dict, POST /v2.0/security-groups - request options - :param security_group_rule_args: dict, - POST /v2.0/security-group-rules request options - """ - security_group_args = security_group_args or {} - security_group_rule_args = security_group_rule_args or {} - - security_group = self._create_security_group(**security_group_args) - msg = "security_group isn't created" - self.assertTrue(security_group, err_msg=msg) - - security_group_rule = self._create_security_group_rule( - security_group["security_group"]["id"], **security_group_rule_args) - msg = "security_group_rule isn't created" - self.assertTrue(security_group_rule, err_msg=msg) - - self._show_security_group_rule( - security_group_rule["security_group_rule"]["id"]) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["neutron"]}, - name="NeutronSecurityGroup.create_and_delete_security_group_rule", - platform="openstack") -class CreateAndDeleteSecurityGroupRule(utils.NeutronScenario): - - def run(self, security_group_args=None, - security_group_rule_args=None): - """Create and delete Neutron security-group-rule. - - Measure the "neutron security-group-rule-create" and "neutron - security-group-rule-delete" command performance. - - :param security_group_args: dict, POST /v2.0/security-groups - request options - :param security_group_rule_args: dict, - POST /v2.0/security-group-rules request options - """ - security_group_args = security_group_args or {} - security_group_rule_args = security_group_rule_args or {} - - security_group = self._create_security_group(**security_group_args) - msg = "security_group isn't created" - self.assertTrue(security_group, err_msg=msg) - - security_group_rule = self._create_security_group_rule( - security_group["security_group"]["id"], **security_group_rule_args) - msg = "security_group_rule isn't created" - self.assertTrue(security_group_rule, err_msg=msg) - - self._delete_security_group_rule( - security_group_rule["security_group_rule"]["id"]) - self._delete_security_group(security_group) diff --git a/rally/plugins/openstack/scenarios/neutron/utils.py b/rally/plugins/openstack/scenarios/neutron/utils.py deleted file mode 100644 index e0532e2f9d..0000000000 --- a/rally/plugins/openstack/scenarios/neutron/utils.py +++ /dev/null @@ -1,878 +0,0 @@ -# Copyright 2014: Intel Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import cfg -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -LOG = logging.getLogger(__name__) - - -class NeutronScenario(scenario.OpenStackScenario): - """Base class for Neutron scenarios with basic atomic actions.""" - - SUBNET_IP_VERSION = 4 - # TODO(rkiran): modify in case LBaaS-v2 requires - LB_METHOD = "ROUND_ROBIN" - LB_PROTOCOL = "HTTP" - LB_PROTOCOL_PORT = 80 - HM_TYPE = "PING" - HM_MAX_RETRIES = 3 - HM_DELAY = 20 - HM_TIMEOUT = 10 - - def _get_network_id(self, network, **kwargs): - """Get Neutron network ID for the network name. - - param network: str, network name/id - param kwargs: dict, network options - returns: str, Neutron network-id - """ - networks = self._list_networks() - for net in networks: - if (net["name"] == network) or (net["id"] == network): - return net["id"] - raise exceptions.NotFoundException( - message="Network %s not found." % network) - - @property - def _ext_gw_mode_enabled(self): - """Determine if the ext-gw-mode extension is enabled. - - Without this extension, we can't pass the enable_snat parameter. - """ - return any( - e["alias"] == "ext-gw-mode" - for e in self.clients("neutron").list_extensions()["extensions"]) - - @atomic.action_timer("neutron.create_network") - def _create_network(self, network_create_args): - """Create neutron network. - - :param network_create_args: dict, POST /v2.0/networks request options - :returns: neutron network dict - """ - network_create_args["name"] = self.generate_random_name() - return self.clients("neutron").create_network( - {"network": network_create_args}) - - @atomic.action_timer("neutron.list_networks") - def _list_networks(self, **kwargs): - """Return user networks list. - - :param kwargs: network list options - """ - return self.clients("neutron").list_networks(**kwargs)["networks"] - - @atomic.action_timer("neutron.list_agents") - def _list_agents(self, **kwargs): - """Fetches agents. - - :param kwargs: neutron agent list options - :returns: user agents list - """ - return self.clients("neutron").list_agents(**kwargs)["agents"] - - @atomic.action_timer("neutron.update_network") - def _update_network(self, network, network_update_args): - """Update the network. - - This atomic function updates the network with network_update_args. - - :param network: Network object - :param network_update_args: dict, POST /v2.0/networks update options - :returns: updated neutron network dict - """ - network_update_args["name"] = self.generate_random_name() - body = {"network": network_update_args} - return self.clients("neutron").update_network( - network["network"]["id"], body) - - @atomic.action_timer("neutron.show_network") - def _show_network(self, network, **kwargs): - """show network details. - - :param network: Network object - :param kwargs: dict, POST /v2.0/networks show options - :returns: details of the network - """ - return self.clients("neutron").show_network( - network["network"]["id"], **kwargs) - - @atomic.action_timer("neutron.delete_network") - def _delete_network(self, network): - """Delete neutron network. - - :param network: Network object - """ - self.clients("neutron").delete_network(network["id"]) - - @atomic.action_timer("neutron.create_subnet") - def _create_subnet(self, network, subnet_create_args, start_cidr=None): - """Create neutron subnet. - - :param network: neutron network dict - :param subnet_create_args: POST /v2.0/subnets request options - :returns: neutron subnet dict - """ - network_id = network["network"]["id"] - - if not subnet_create_args.get("cidr"): - start_cidr = start_cidr or "10.2.0.0/24" - subnet_create_args["cidr"] = ( - network_wrapper.generate_cidr(start_cidr=start_cidr)) - - subnet_create_args["network_id"] = network_id - subnet_create_args["name"] = self.generate_random_name() - subnet_create_args.setdefault("ip_version", self.SUBNET_IP_VERSION) - - return self.clients("neutron").create_subnet( - {"subnet": subnet_create_args}) - - @atomic.action_timer("neutron.list_subnets") - def _list_subnets(self): - """Returns user subnetworks list.""" - return self.clients("neutron").list_subnets()["subnets"] - - @atomic.action_timer("neutron.show_subnet") - def _show_subnet(self, subnet, **kwargs): - """show subnet details. - - :param: subnet: Subnet object - :param: kwargs: Optional additional arguments for subnet show - :returns: details of the subnet - """ - return self.clients("neutron").show_subnet(subnet["subnet"]["id"], - **kwargs) - - @atomic.action_timer("neutron.update_subnet") - def _update_subnet(self, subnet, subnet_update_args): - """Update the neutron subnet. - - This atomic function updates the subnet with subnet_update_args. - - :param subnet: Subnet object - :param subnet_update_args: dict, PUT /v2.0/subnets update options - :returns: updated neutron subnet dict - """ - subnet_update_args["name"] = self.generate_random_name() - body = {"subnet": subnet_update_args} - return self.clients("neutron").update_subnet( - subnet["subnet"]["id"], body) - - @atomic.action_timer("neutron.delete_subnet") - def _delete_subnet(self, subnet): - """Delete neutron subnet - - :param subnet: Subnet object - """ - self.clients("neutron").delete_subnet(subnet["subnet"]["id"]) - - @atomic.action_timer("neutron.create_router") - def _create_router(self, router_create_args, external_gw=False): - """Create neutron router. - - :param router_create_args: POST /v2.0/routers request options - :returns: neutron router dict - """ - router_create_args["name"] = self.generate_random_name() - - if external_gw: - for network in self._list_networks(): - if network.get("router:external"): - external_network = network - gw_info = {"network_id": external_network["id"]} - if self._ext_gw_mode_enabled: - gw_info["enable_snat"] = True - router_create_args.setdefault("external_gateway_info", - gw_info) - - return self.clients("neutron").create_router( - {"router": router_create_args}) - - @atomic.action_timer("neutron.list_routers") - def _list_routers(self): - """Returns user routers list.""" - return self.clients("neutron").list_routers()["routers"] - - @atomic.action_timer("neutron.show_router") - def _show_router(self, router, **kwargs): - """Show information of a given router. - - :param router: ID or name of router to look up - :kwargs: dict, POST /v2.0/routers show options - :return: details of the router - """ - return self.clients("neutron").show_router( - router["router"]["id"], **kwargs) - - @atomic.action_timer("neutron.delete_router") - def _delete_router(self, router): - """Delete neutron router - - :param router: Router object - """ - self.clients("neutron").delete_router(router["router"]["id"]) - - @atomic.action_timer("neutron.update_router") - def _update_router(self, router, router_update_args): - """Update the neutron router. - - This atomic function updates the router with router_update_args. - - :param router: dict, neutron router - :param router_update_args: dict, PUT /v2.0/routers update options - :returns: updated neutron router dict - """ - router_update_args["name"] = self.generate_random_name() - body = {"router": router_update_args} - return self.clients("neutron").update_router( - router["router"]["id"], body) - - @atomic.action_timer("neutron.create_port") - def _create_port(self, network, port_create_args): - """Create neutron port. - - :param network: neutron network dict - :param port_create_args: POST /v2.0/ports request options - :returns: neutron port dict - """ - port_create_args["network_id"] = network["network"]["id"] - port_create_args["name"] = self.generate_random_name() - return self.clients("neutron").create_port({"port": port_create_args}) - - @atomic.action_timer("neutron.list_ports") - def _list_ports(self): - """Return user ports list.""" - return self.clients("neutron").list_ports()["ports"] - - @atomic.action_timer("neutron.show_port") - def _show_port(self, port, **params): - """Return user port details. - - :param port: dict, neutron port - :param params: neutron port show options - :returns: neutron port dict - """ - return self.clients("neutron").show_port(port["port"]["id"], **params) - - @atomic.action_timer("neutron.update_port") - def _update_port(self, port, port_update_args): - """Update the neutron port. - - This atomic function updates port with port_update_args. - - :param port: dict, neutron port - :param port_update_args: dict, PUT /v2.0/ports update options - :returns: updated neutron port dict - """ - port_update_args["name"] = self.generate_random_name() - body = {"port": port_update_args} - return self.clients("neutron").update_port(port["port"]["id"], body) - - @atomic.action_timer("neutron.delete_port") - def _delete_port(self, port): - """Delete neutron port. - - :param port: Port object - """ - self.clients("neutron").delete_port(port["port"]["id"]) - - @logging.log_deprecated_args( - "network_create_args is deprecated; use the network context instead", - "0.1.0", "network_create_args") - def _get_or_create_network(self, network_create_args=None): - """Get a network from context, or create a new one. - - This lets users either create networks with the 'network' - context, provide existing networks with the 'existing_network' - context, or let the scenario create a default network for - them. Running this without one of the network contexts is - deprecated. - - :param network_create_args: Deprecated way to provide network - creation args; use the network - context instead. - :returns: Network dict - """ - if "networks" in self.context["tenant"]: - return {"network": - random.choice(self.context["tenant"]["networks"])} - else: - LOG.warning("Running this scenario without either the 'network' " - "or 'existing_network' context is deprecated") - return self._create_network(network_create_args or {}) - - def _create_subnets(self, network, - subnet_create_args=None, - subnet_cidr_start=None, - subnets_per_network=1): - """Create new subnets in the given network. - - :param network: network to create subnets in - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :returns: List of subnet dicts - """ - return [self._create_subnet(network, subnet_create_args or {}, - subnet_cidr_start) - for i in range(subnets_per_network)] - - def _create_network_and_subnets(self, - network_create_args=None, - subnet_create_args=None, - subnets_per_network=1, - subnet_cidr_start="1.0.0.0/24"): - """Create network and subnets. - - :parm network_create_args: dict, POST /v2.0/networks request options - :parm subnet_create_args: dict, POST /v2.0/subnets request options - :parm subnets_per_network: int, number of subnets for one network - :parm subnet_cidr_start: str, start value for subnets CIDR - :returns: tuple of result network and subnets list - """ - network = self._create_network(network_create_args or {}) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, subnets_per_network) - return network, subnets - - def _create_network_structure(self, network_create_args=None, - subnet_create_args=None, - subnet_cidr_start=None, - subnets_per_network=None, - router_create_args=None): - """Create a network and a given number of subnets and routers. - - :param network_create_args: dict, POST /v2.0/networks request options - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param subnets_per_network: int, number of subnets for one network - :param router_create_args: dict, POST /v2.0/routers request options - :returns: tuple of (network, subnets, routers) - """ - network = self._create_network(network_create_args or {}) - subnets = self._create_subnets(network, subnet_create_args, - subnet_cidr_start, - subnets_per_network) - - routers = [] - for subnet in subnets: - router = self._create_router(router_create_args or {}) - self._add_interface_router(subnet["subnet"], - router["router"]) - routers.append(router) - - return (network, subnets, routers) - - @atomic.action_timer("neutron.add_interface_router") - def _add_interface_router(self, subnet, router): - """Connect subnet to router. - - :param subnet: dict, neutron subnet - :param router: dict, neutron router - """ - self.clients("neutron").add_interface_router( - router["id"], {"subnet_id": subnet["id"]}) - - @atomic.action_timer("neutron.remove_interface_router") - def _remove_interface_router(self, subnet, router): - """Remove subnet from router - - :param subnet: dict, neutron subnet - :param router: dict, neutron router - """ - self.clients("neutron").remove_interface_router( - router["id"], {"subnet_id": subnet["id"]}) - - @atomic.action_timer("neutron.add_gateway_router") - def _add_gateway_router(self, router, ext_net, enable_snat): - """Set the external network gateway for a router. - - :param router: dict, neutron router - :param ext_net: external network for the gateway - :param enable_snat: True if enable snat - """ - gw_info = {"network_id": ext_net["network"]["id"]} - if self._ext_gw_mode_enabled: - gw_info["enable_snat"] = enable_snat - self.clients("neutron").add_gateway_router( - router["router"]["id"], gw_info) - - @atomic.action_timer("neutron.remove_gateway_router") - def _remove_gateway_router(self, router): - """Removes an external network gateway from the specified router. - - :param router: dict, neutron router - """ - self.clients("neutron").remove_gateway_router( - router["router"]["id"]) - - @atomic.action_timer("neutron.create_pool") - def _create_lb_pool(self, subnet_id, **pool_create_args): - """Create LB pool(v1) - - :param subnet_id: str, neutron subnet-id - :param pool_create_args: dict, POST /lb/pools request options - :returns: dict, neutron lb pool - """ - args = {"lb_method": self.LB_METHOD, - "protocol": self.LB_PROTOCOL, - "name": self.generate_random_name(), - "subnet_id": subnet_id} - args.update(pool_create_args) - return self.clients("neutron").create_pool({"pool": args}) - - def _create_v1_pools(self, networks, **pool_create_args): - """Create LB pools(v1) - - :param networks: list, neutron networks - :param pool_create_args: dict, POST /lb/pools request options - :returns: list, neutron lb pools - """ - subnets = [] - pools = [] - for net in networks: - subnets.extend(net.get("subnets", [])) - for subnet_id in subnets: - pools.append(self._create_lb_pool( - subnet_id, **pool_create_args)) - return pools - - @atomic.action_timer("neutron.list_pools") - def _list_v1_pools(self, **kwargs): - """Return user lb pool list(v1).""" - return self.clients("neutron").list_pools(**kwargs) - - @atomic.action_timer("neutron.delete_pool") - def _delete_v1_pool(self, pool): - """Delete neutron pool. - - :param pool: Pool object - """ - self.clients("neutron").delete_pool(pool["id"]) - - @atomic.action_timer("neutron.update_pool") - def _update_v1_pool(self, pool, **pool_update_args): - """Update pool. - - This atomic function updates the pool with pool_update_args. - - :param pool: Pool object - :param pool_update_args: dict, POST /lb/pools update options - :returns: updated neutron pool dict - """ - pool_update_args["name"] = self.generate_random_name() - body = {"pool": pool_update_args} - return self.clients("neutron").update_pool(pool["pool"]["id"], body) - - def _create_v1_vip(self, pool, **vip_create_args): - """Create VIP(v1) - - :parm pool: dict, neutron lb-pool - :parm vip_create_args: dict, POST /lb/vips request options - :returns: dict, neutron lb vip - """ - args = {"protocol": self.LB_PROTOCOL, - "protocol_port": self.LB_PROTOCOL_PORT, - "name": self.generate_random_name(), - "pool_id": pool["pool"]["id"], - "subnet_id": pool["pool"]["subnet_id"]} - args.update(vip_create_args) - return self.clients("neutron").create_vip({"vip": args}) - - @atomic.action_timer("neutron.list_vips") - def _list_v1_vips(self, **kwargs): - """Return user lb vip list(v1).""" - return self.clients("neutron").list_vips(**kwargs) - - @atomic.action_timer("neutron.delete_vip") - def _delete_v1_vip(self, vip): - """Delete neutron vip. - - :param vip: neutron Virtual IP object - """ - self.clients("neutron").delete_vip(vip["id"]) - - @atomic.action_timer("neutron.update_vip") - def _update_v1_vip(self, vip, **vip_update_args): - """Updates vip. - - This atomic function updates vip name and admin state - - :param vip: Vip object - :param vip_update_args: dict, POST /lb/vips update options - :returns: updated neutron vip dict - """ - vip_update_args["name"] = self.generate_random_name() - body = {"vip": vip_update_args} - return self.clients("neutron").update_vip(vip["vip"]["id"], body) - - @atomic.action_timer("neutron.create_floating_ip") - def _create_floatingip(self, floating_network, **floating_ip_args): - """Create floating IP with floating_network. - - param: floating_network: str, external network to create floating IP - param: floating_ip_args: dict, POST /floatingips create options - returns: dict, neutron floating IP - """ - from neutronclient.common import exceptions as ne - floating_network_id = self._get_network_id( - floating_network) - args = {"floating_network_id": floating_network_id} - - if not CONF.openstack.pre_newton_neutron: - args["description"] = self.generate_random_name() - args.update(floating_ip_args) - try: - return self.clients("neutron").create_floatingip( - {"floatingip": args}) - except ne.BadRequest as e: - error = "%s" % e - if "Unrecognized attribute" in error and "'description'" in error: - LOG.info("It looks like you have Neutron API of pre-Newton " - "OpenStack release. Setting " - "openstack.pre_newton_neutron option via Rally " - "configuration should fix an issue.") - raise - - @atomic.action_timer("neutron.list_floating_ips") - def _list_floating_ips(self, **kwargs): - """Return floating IPs list.""" - return self.clients("neutron").list_floatingips(**kwargs) - - @atomic.action_timer("neutron.delete_floating_ip") - def _delete_floating_ip(self, floating_ip): - """Delete floating IP. - - :param: dict, floating IP object - """ - return self.clients("neutron").delete_floatingip(floating_ip["id"]) - - @atomic.action_timer("neutron.create_healthmonitor") - def _create_v1_healthmonitor(self, **healthmonitor_create_args): - """Create LB healthmonitor. - - This atomic function creates healthmonitor with the provided - healthmonitor_create_args. - - :param healthmonitor_create_args: dict, POST /lb/healthmonitors - :returns: neutron healthmonitor dict - """ - args = {"type": self.HM_TYPE, - "delay": self.HM_DELAY, - "max_retries": self.HM_MAX_RETRIES, - "timeout": self.HM_TIMEOUT} - args.update(healthmonitor_create_args) - return self.clients("neutron").create_health_monitor( - {"health_monitor": args}) - - @atomic.action_timer("neutron.list_healthmonitors") - def _list_v1_healthmonitors(self, **kwargs): - """List LB healthmonitors. - - This atomic function lists all helthmonitors. - - :param kwargs: optional parameters - :returns: neutron lb healthmonitor list - """ - return self.clients("neutron").list_health_monitors(**kwargs) - - @atomic.action_timer("neutron.delete_healthmonitor") - def _delete_v1_healthmonitor(self, healthmonitor): - """Delete neutron healthmonitor. - - :param healthmonitor: neutron healthmonitor dict - """ - self.clients("neutron").delete_health_monitor(healthmonitor["id"]) - - @atomic.action_timer("neutron.update_healthmonitor") - def _update_v1_healthmonitor(self, healthmonitor, - **healthmonitor_update_args): - """Update neutron healthmonitor. - - :param healthmonitor: neutron lb healthmonitor dict - :param healthmonitor_update_args: POST /lb/healthmonitors - update options - :returns: updated neutron lb healthmonitor dict - """ - body = {"health_monitor": healthmonitor_update_args} - return self.clients("neutron").update_health_monitor( - healthmonitor["health_monitor"]["id"], body) - - @atomic.action_timer("neutron.create_security_group") - def _create_security_group(self, **security_group_create_args): - """Create Neutron security-group. - - param: security_group_create_args: dict, POST /v2.0/security-groups - request options - return: dict, neutron security-group - """ - security_group_create_args["name"] = self.generate_random_name() - return self.clients("neutron").create_security_group( - {"security_group": security_group_create_args}) - - @atomic.action_timer("neutron.delete_security_group") - def _delete_security_group(self, security_group): - """Delete Neutron security group. - - param: security_group: dict, neutron security_group - """ - return self.clients("neutron").delete_security_group( - security_group["security_group"]["id"]) - - @atomic.action_timer("neutron.list_security_groups") - def _list_security_groups(self, **kwargs): - """Return list of Neutron security groups.""" - return self.clients("neutron").list_security_groups(**kwargs) - - @atomic.action_timer("neutron.show_security_group") - def _show_security_group(self, security_group, **kwargs): - """Show security group details. - - :param: security_group: dict, neutron security_group - :param: kwargs: Optional additional arguments for security_group show - :returns: security_group details - """ - return self.clients("neutron").show_security_group( - security_group["security_group"]["id"], **kwargs) - - @atomic.action_timer("neutron.update_security_group") - def _update_security_group(self, security_group, - **security_group_update_args): - """Update Neutron security-group. - - param: security_group: dict, neutron security_group - param: security_group_update_args: dict, POST /v2.0/security-groups - update options - return: dict, updated neutron security-group - """ - security_group_update_args["name"] = self.generate_random_name() - body = {"security_group": security_group_update_args} - return self.clients("neutron").update_security_group( - security_group["security_group"]["id"], body) - - def update_loadbalancer_resource(self, lb): - try: - new_lb = self.clients("neutron").show_loadbalancer(lb["id"]) - except Exception as e: - if getattr(e, "status_code", 400) == 404: - raise exceptions.GetResourceNotFound(resource=lb) - raise exceptions.GetResourceFailure(resource=lb, err=e) - return new_lb["loadbalancer"] - - @atomic.action_timer("neutron.create_lbaasv2_loadbalancer") - def _create_lbaasv2_loadbalancer(self, subnet_id, **lb_create_args): - """Create LB loadbalancer(v2) - - :param subnet_id: str, neutron subnet-id - :param lb_create_args: dict, POST /lbaas/loadbalancers request options - :returns: dict, neutron lb - """ - args = {"name": self.generate_random_name(), - "vip_subnet_id": subnet_id} - args.update(lb_create_args) - neutronclient = self.clients("neutron") - lb = neutronclient.create_loadbalancer({"loadbalancer": args}) - lb = lb["loadbalancer"] - lb = utils.wait_for_status( - lb, - ready_statuses=["ACTIVE"], - status_attr="provisioning_status", - update_resource=self.update_loadbalancer_resource, - timeout=CONF.openstack.neutron_create_loadbalancer_timeout, - check_interval=( - CONF.openstack.neutron_create_loadbalancer_poll_interval) - ) - return lb - - @atomic.action_timer("neutron.list_lbaasv2_loadbalancers") - def _list_lbaasv2_loadbalancers(self, retrieve_all=True, **lb_list_args): - """List LB loadbalancers(v2) - - :param lb_list_args: dict, POST /lbaas/loadbalancers request options - :returns: dict, neutron lb loadbalancers(v2) - """ - return self.clients("neutron").list_loadbalancers(retrieve_all, - **lb_list_args) - - @atomic.action_timer("neutron.create_bgpvpn") - def _create_bgpvpn(self, **kwargs): - """Create Bgpvpn resource (POST /bgpvpn/bgpvpn) - - :param kwargs: optional parameters to create BGP VPN - :returns dict, bgpvpn resource details - """ - kwargs["name"] = self.generate_random_name() - return self.admin_clients("neutron").create_bgpvpn({"bgpvpn": kwargs}) - - @atomic.action_timer("neutron.delete_bgpvpn") - def _delete_bgpvpn(self, bgpvpn): - """Delete Bgpvpn resource.(DELETE /bgpvpn/bgpvpns/{id}) - - :param bgpvpn: dict, bgpvpn - :return dict, bgpvpn - """ - return self.admin_clients("neutron").delete_bgpvpn( - bgpvpn["bgpvpn"]["id"]) - - @atomic.action_timer("neutron.list_bgpvpns") - def _list_bgpvpns(self, **kwargs): - """Return bgpvpns list. - - :param kwargs: dict, GET /bgpvpn/bgpvpns request options - :returns: bgpvpns list - """ - return self.admin_clients("neutron").list_bgpvpns( - True, **kwargs)["bgpvpns"] - - @atomic.action_timer("neutron.update_bgpvpn") - def _update_bgpvpn(self, bgpvpn, update_name=False, **kwargs): - """Update a bgpvpn. - - :param bgpvpn: dict, bgpvpn - :param update_name: update_name: bool, whether or not to modify - BGP VPN name - :param **kwargs: dict, PUT /bgpvpn/bgpvpns update options - :return dict, updated bgpvpn - """ - if update_name or "name" in kwargs: - kwargs["name"] = self.generate_random_name() - return self.admin_clients("neutron").update_bgpvpn( - bgpvpn["bgpvpn"]["id"], {"bgpvpn": kwargs}) - - @atomic.action_timer("neutron.create_bgpvpn_network_assoc") - def _create_bgpvpn_network_assoc(self, bgpvpn, network): - """Creates a new BGP VPN network association. - - :param bgpvpn: dict, bgpvpn - :param network: dict, network - :return dict: network_association - """ - netassoc = {"network_id": network["id"]} - return self.clients("neutron").create_bgpvpn_network_assoc( - bgpvpn["bgpvpn"]["id"], {"network_association": netassoc}) - - @atomic.action_timer("neutron.delete_bgpvpn_network_assoc") - def _delete_bgpvpn_network_assoc(self, bgpvpn, net_assoc): - """Delete the specified BGP VPN network association - - :param bgpvpn: dict, bgpvpn - :param net_assoc: dict, network - :return dict: network_association - """ - return self.clients("neutron").delete_bgpvpn_network_assoc( - bgpvpn["bgpvpn"]["id"], net_assoc["network_association"]["id"]) - - @atomic.action_timer("neutron.create_bgpvpn_router_assoc") - def _create_bgpvpn_router_assoc(self, bgpvpn, router): - """Creates a new BGP VPN router association. - - :param bgpvpn: dict, bgpvpn - :param router: dict, router - :return dict: network_association - """ - router_assoc = {"router_id": router["id"]} - return self.clients("neutron").create_bgpvpn_router_assoc( - bgpvpn["bgpvpn"]["id"], {"router_association": router_assoc}) - - @atomic.action_timer("neutron.delete_bgpvpn_router_assoc") - def _delete_bgpvpn_router_assoc(self, bgpvpn, router_assoc): - """Delete the specified BGP VPN router association - - :param bgpvpn: dict, bgpvpn - :param router_assoc: dict, router - :return dict: router_association - """ - return self.clients("neutron").delete_bgpvpn_router_assoc( - bgpvpn["bgpvpn"]["id"], router_assoc["router_association"]["id"]) - - @atomic.action_timer("neutron.list_bgpvpn_network_assocs") - def _list_bgpvpn_network_assocs(self, bgpvpn, **kwargs): - """List network association of bgpvpn - - :param bgpvpn: dict, bgpvpn - :param **kwargs: dict, optional parameters - :return dict: network_association - """ - return self.clients("neutron").list_bgpvpn_network_assocs( - bgpvpn["bgpvpn"]["id"], **kwargs) - - @atomic.action_timer("neutron.list_bgpvpn_router_assocs") - def _list_bgpvpn_router_assocs(self, bgpvpn, **kwargs): - """List router association of bgpvpn - - :param bgpvpn: dict, bgpvpn - :param **kwargs: dict, optional parameters - :return dict: router_association - """ - return self.clients("neutron").list_bgpvpn_router_assocs( - bgpvpn["bgpvpn"]["id"], **kwargs) - - @atomic.action_timer("neutron.create_security_group_rule") - def _create_security_group_rule(self, security_group_id, - **security_group_rule_args): - """Create Neutron security-group-rule. - - param: security_group_id: id of neutron security_group - param: security_group_rule_args: dict, POST - /v2.0/security-group-rules request options - return: dict, neutron security-group-rule - """ - security_group_rule_args["security_group_id"] = security_group_id - if "direction" not in security_group_rule_args: - security_group_rule_args["direction"] = "ingress" - - return self.clients("neutron").create_security_group_rule( - {"security_group_rule": security_group_rule_args}) - - @atomic.action_timer("neutron.list_security_group_rules") - def _list_security_group_rules(self, **kwargs): - """List all security group rules. - - :param kwargs: Optional additional arguments for roles list - :return: list of security group rules - """ - return self.clients("neutron").list_security_group_rules(**kwargs) - - @atomic.action_timer("neutron.show_security_group_rule") - def _show_security_group_rule(self, security_group_rule, **kwargs): - """Show information of a given security group rule. - - :param security_group_rule: id of security group rule - :param kwargs: Optional additional arguments for roles list - :return: details of security group rule - """ - return self.clients("neutron").show_security_group_rule( - security_group_rule, **kwargs) - - @atomic.action_timer("neutron.delete_security_group_rule") - def _delete_security_group_rule(self, security_group_rule): - """Delete a given security group rule. - - :param security_group_rule: id of security group rule - """ - self.clients("neutron").delete_security_group_rule( - security_group_rule) diff --git a/rally/plugins/openstack/scenarios/nova/__init__.py b/rally/plugins/openstack/scenarios/nova/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/nova/agents.py b/rally/plugins/openstack/scenarios/nova/agents.py deleted file mode 100644 index 25a9c71dd5..0000000000 --- a/rally/plugins/openstack/scenarios/nova/agents.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova agents.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaAgents.list_agents", platform="openstack") -class ListAgents(utils.NovaScenario): - def run(self, hypervisor=None): - """List all builds. - - Measure the "nova agent-list" command performance. - - :param hypervisor: List agent builds on a specific hypervisor. - None (default value) means list for all - hypervisors - """ - self._list_agents(hypervisor) diff --git a/rally/plugins/openstack/scenarios/nova/aggregates.py b/rally/plugins/openstack/scenarios/nova/aggregates.py deleted file mode 100644 index 4c9cee83f9..0000000000 --- a/rally/plugins/openstack/scenarios/nova/aggregates.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Nova aggregates.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaAggregates.list_aggregates", - platform="openstack") -class ListAggregates(utils.NovaScenario): - - def run(self): - """List all nova aggregates. - - Measure the "nova aggregate-list" command performance. - """ - self._list_aggregates() - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaAggregates.create_and_list_aggregates", - platform="openstack") -class CreateAndListAggregates(utils.NovaScenario): - """scenario for create and list aggregate.""" - - def run(self, availability_zone): - """Create a aggregate and then list all aggregates. - - This scenario creates a aggregate and then lists all aggregates. - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - msg = "Aggregate isn't created" - self.assertTrue(aggregate, err_msg=msg) - all_aggregates = self._list_aggregates() - msg = ("Created aggregate is not in the" - " list of all available aggregates") - self.assertIn(aggregate, all_aggregates, err_msg=msg) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaAggregates.create_and_delete_aggregate", - platform="openstack") -class CreateAndDeleteAggregate(utils.NovaScenario): - """Scenario for create and delete aggregate.""" - - def run(self, availability_zone): - """Create an aggregate and then delete it. - - This scenario first creates an aggregate and then delete it. - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - self._delete_aggregate(aggregate) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaAggregates.create_and_update_aggregate", - platform="openstack") -class CreateAndUpdateAggregate(utils.NovaScenario): - """Scenario for create and update aggregate.""" - - def run(self, availability_zone): - """Create an aggregate and then update its name and availability_zone - - This scenario first creates an aggregate and then update its name and - availability_zone - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - self._update_aggregate(aggregate) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaAggregates.create_aggregate_add_and_remove_host", - platform="openstack") -class CreateAggregateAddAndRemoveHost(utils.NovaScenario): - """Scenario for add a host to and remove the host from an aggregate.""" - - def run(self, availability_zone): - """Create an aggregate, add a host to and remove the host from it - - Measure "nova aggregate-add-host" and "nova aggregate-remove-host" - command performance. - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - hosts = self._list_hypervisors() - host_name = hosts[0].service["host"] - self._aggregate_add_host(aggregate, host_name) - self._aggregate_remove_host(aggregate, host_name) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaAggregates.create_and_get_aggregate_details", - platform="openstack") -class CreateAndGetAggregateDetails(utils.NovaScenario): - """Scenario for create and get aggregate details.""" - - def run(self, availability_zone): - """Create an aggregate and then get its details. - - This scenario first creates an aggregate and then get details of it. - :param availability_zone: The availability zone of the aggregate - """ - aggregate = self._create_aggregate(availability_zone) - self._get_aggregate_details(aggregate) - - -@types.convert(image={"type": "glance_image"}) -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure( - context={"admin_cleanup@openstack": ["nova"], - "cleanup@openstack": ["nova"]}, - name="NovaAggregates.create_aggregate_add_host_and_boot_server", - platform="openstack") -class CreateAggregateAddHostAndBootServer(utils.NovaScenario): - """Scenario to verify an aggregate.""" - - def run(self, image, metadata, availability_zone=None, ram=512, vcpus=1, - disk=1, boot_server_kwargs=None): - """Scenario to create and verify an aggregate - - This scenario creates an aggregate, adds a compute host and metadata - to the aggregate, adds the same metadata to the flavor and creates an - instance. Verifies that instance host is one of the hosts in the - aggregate. - - :param image: The image ID to boot from - :param metadata: The metadata to be set as flavor extra specs - :param availability_zone: The availability zone of the aggregate - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param boot_server_kwargs: Optional additional arguments to verify host - aggregates - :raises RallyException: if instance and aggregate hosts do not match - """ - - boot_server_kwargs = boot_server_kwargs or {} - - aggregate = self._create_aggregate(availability_zone) - hosts = self._list_hypervisors() - host_name = hosts[0].service["host"] - self._aggregate_set_metadata(aggregate, metadata) - self._aggregate_add_host(aggregate, host_name) - flavor = self._create_flavor(ram, vcpus, disk) - flavor.set_keys(metadata) - - server = self._boot_server(image, flavor.id, **boot_server_kwargs) - # NOTE: we need to get server object by admin user to obtain - # "hypervisor_hostname" attribute - server = self.admin_clients("nova").servers.get(server.id) - instance_hostname = getattr(server, - "OS-EXT-SRV-ATTR:hypervisor_hostname") - if instance_hostname != host_name: - raise exceptions.RallyException("Instance host and aggregate " - "host are different") diff --git a/rally/plugins/openstack/scenarios/nova/availability_zones.py b/rally/plugins/openstack/scenarios/nova/availability_zones.py deleted file mode 100644 index 92397da185..0000000000 --- a/rally/plugins/openstack/scenarios/nova/availability_zones.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova availability-zones.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaAvailabilityZones.list_availability_zones", - platform="openstack") -class ListAvailabilityZones(utils.NovaScenario): - - def run(self, detailed=True): - """List all availability zones. - - Measure the "nova availability-zone-list" command performance. - - :param detailed: True if the availability-zone listing should contain - detailed information about all of them - """ - self._list_availability_zones(detailed) diff --git a/rally/plugins/openstack/scenarios/nova/flavors.py b/rally/plugins/openstack/scenarios/nova/flavors.py deleted file mode 100644 index bb382bcafa..0000000000 --- a/rally/plugins/openstack/scenarios/nova/flavors.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright 2015: Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova flavors.""" - - -LOG = logging.getLogger(__name__) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="NovaFlavors.list_flavors", platform="openstack") -class ListFlavors(utils.NovaScenario): - - def run(self, detailed=True, is_public=True, marker=None, min_disk=None, - min_ram=None, limit=None, sort_key=None, sort_dir=None): - """List all flavors. - - Measure the "nova flavor-list" command performance. - - :param detailed: Whether flavor needs to be return with details - (optional). - :param is_public: Filter flavors with provided access type (optional). - None means give all flavors and only admin has query - access to all flavor types. - :param marker: Begin returning flavors that appear later in the flavor - list than that represented by this flavor id (optional). - :param min_disk: Filters the flavors by a minimum disk space, in GiB. - :param min_ram: Filters the flavors by a minimum RAM, in MB. - :param limit: maximum number of flavors to return (optional). - :param sort_key: Flavors list sort key (optional). - :param sort_dir: Flavors list sort direction (optional). - """ - self._list_flavors(detailed=detailed, is_public=is_public, - marker=marker, min_disk=min_disk, min_ram=min_ram, - limit=limit, sort_key=sort_key, sort_dir=sort_dir) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaFlavors.create_and_list_flavor_access", - platform="openstack") -class CreateAndListFlavorAccess(utils.NovaScenario): - - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create a non-public flavor and list its access rules - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - # NOTE(pirsriva): access rules can be listed - # only for non-public flavors - if is_public: - LOG.warning("is_public cannot be set to True for listing " - "flavor access rules. Setting is_public to False") - is_public = False - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self.assertTrue(flavor) - - self._list_flavor_access(flavor.id) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaFlavors.create_flavor_and_add_tenant_access", - platform="openstack") -class CreateFlavorAndAddTenantAccess(utils.NovaScenario): - - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create a flavor and Add flavor access for the given tenant. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self.assertTrue(flavor) - self._add_tenant_access(flavor.id, self.context["tenant"]["id"]) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaFlavors.create_flavor", platform="openstack") -class CreateFlavor(utils.NovaScenario): - - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create a flavor. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaFlavors.create_and_get_flavor", - platform="openstack") -class CreateAndGetFlavor(utils.NovaScenario): - """Scenario for create and get flavor.""" - - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create flavor and get detailed information of the flavor. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self._get_flavor(flavor.id) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaFlavors.create_and_delete_flavor", - platform="openstack") -class CreateAndDeleteFlavor(utils.NovaScenario): - def run(self, ram, vcpus, disk, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create flavor and delete the flavor. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self._delete_flavor(flavor.id) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, - name="NovaFlavors.create_flavor_and_set_keys", - platform="openstack") -class CreateFlavorAndSetKeys(utils.NovaScenario): - def run(self, ram, vcpus, disk, extra_specs, flavorid="auto", - ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): - """Create flavor and set keys to the flavor. - - Measure the "nova flavor-key" command performance. - the scenario first create a flavor,then add the extra specs to it. - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param extra_specs: additional arguments for flavor set keys - :param flavorid: ID for the flavor (optional). You can use the reserved - value ``"auto"`` to have Nova generate a UUID for the - flavor in cases where you cannot simply pass ``None``. - :param ephemeral: Ephemeral space size in GB (default 0). - :param swap: Swap space in MB - :param rxtx_factor: RX/TX factor - :param is_public: Make flavor accessible to the public (default true). - """ - flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, - ephemeral=ephemeral, swap=swap, - rxtx_factor=rxtx_factor, - is_public=is_public) - self._set_flavor_keys(flavor, extra_specs) diff --git a/rally/plugins/openstack/scenarios/nova/hypervisors.py b/rally/plugins/openstack/scenarios/nova/hypervisors.py deleted file mode 100644 index ac50ea21fa..0000000000 --- a/rally/plugins/openstack/scenarios/nova/hypervisors.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2015 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova hypervisors.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.list_hypervisors", - platform="openstack") -class ListHypervisors(utils.NovaScenario): - - def run(self, detailed=True): - """List hypervisors. - - Measure the "nova hypervisor-list" command performance. - - :param detailed: True if the hypervisor listing should contain - detailed information about all of them - """ - self._list_hypervisors(detailed) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.list_and_get_hypervisors", - platform="openstack") -class ListAndGetHypervisors(utils.NovaScenario): - - def run(self, detailed=True): - """List and Get hypervisors. - - The scenario first lists all hypervisors, then get detailed information - of the listed hypervisors in turn. - - Measure the "nova hypervisor-show" command performance. - - :param detailed: True if the hypervisor listing should contain - detailed information about all of them - """ - hypervisors = self._list_hypervisors(detailed) - - for hypervisor in hypervisors: - self._get_hypervisor(hypervisor) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.statistics_hypervisors", - platform="openstack") -class StatisticsHypervisors(utils.NovaScenario): - - def run(self): - """Get hypervisor statistics over all compute nodes. - - Measure the "nova hypervisor-stats" command performance. - """ - self._statistics_hypervisors() - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.list_and_get_uptime_hypervisors", - platform="openstack") -class ListAndGetUptimeHypervisors(utils.NovaScenario): - - def run(self, detailed=True): - """List hypervisors,then display the uptime of it. - - The scenario first list all hypervisors,then display - the uptime of the listed hypervisors in turn. - - Measure the "nova hypervisor-uptime" command performance. - - :param detailed: True if the hypervisor listing should contain - detailed information about all of them - """ - hypervisors = self._list_hypervisors(detailed) - - for hypervisor in hypervisors: - self._uptime_hypervisor(hypervisor) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaHypervisors.list_and_search_hypervisors", - platform="openstack") -class ListAndSearchHypervisors(utils.NovaScenario): - - def run(self, detailed=True): - """List all servers belonging to specific hypervisor. - - The scenario first list all hypervisors,then find its hostname, - then list all servers belonging to the hypervisor - - Measure the "nova hypervisor-servers " command performance. - - :param detailed: True if the hypervisor listing should contain - detailed information about all of them - """ - hypervisors = self._list_hypervisors(detailed) - - for hypervisor in hypervisors: - self._search_hypervisors(hypervisor.hypervisor_hostname) diff --git a/rally/plugins/openstack/scenarios/nova/images.py b/rally/plugins/openstack/scenarios/nova/images.py deleted file mode 100644 index 51c244350f..0000000000 --- a/rally/plugins/openstack/scenarios/nova/images.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2015: Workday, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common.plugin import plugin -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova images.""" - - -@plugin.deprecated("The image proxy-interface was removed from Nova-API. Use " - "Glance related scenarios instead " - "(i.e GlanceImages.list_images.", rally_version="0.10.0") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="NovaImages.list_images", platform="openstack") -class ListImages(utils.NovaScenario): - - def run(self, detailed=True, **kwargs): - """[DEPRECATED] List all images. - - Measure the "nova image-list" command performance. - - :param detailed: True if the image listing - should contain detailed information - :param kwargs: Optional additional arguments for image listing - """ - self._list_images(detailed, **kwargs) diff --git a/rally/plugins/openstack/scenarios/nova/keypairs.py b/rally/plugins/openstack/scenarios/nova/keypairs.py deleted file mode 100644 index b333f734c8..0000000000 --- a/rally/plugins/openstack/scenarios/nova/keypairs.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2015: Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Nova keypairs.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaKeypair.create_and_list_keypairs", - platform="openstack") -class CreateAndListKeypairs(utils.NovaScenario): - - def run(self, **kwargs): - """Create a keypair with random name and list keypairs. - - This scenario creates a keypair and then lists all keypairs. - - :param kwargs: Optional additional arguments for keypair creation - """ - - keypair_name = self._create_keypair(**kwargs) - self.assertTrue(keypair_name, "Keypair isn't created") - - list_keypairs = self._list_keypairs() - self.assertIn(keypair_name, [i.id for i in list_keypairs]) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaKeypair.create_and_delete_keypair", - platform="openstack") -class CreateAndDeleteKeypair(utils.NovaScenario): - - def run(self, **kwargs): - """Create a keypair with random name and delete keypair. - - This scenario creates a keypair and then delete that keypair. - - :param kwargs: Optional additional arguments for keypair creation - """ - - keypair = self._create_keypair(**kwargs) - self._delete_keypair(keypair) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaKeypair.boot_and_delete_server_with_keypair", - platform="openstack") -class BootAndDeleteServerWithKeypair(utils.NovaScenario): - - @logging.log_deprecated_args( - "'server_kwargs' has been renamed 'boot_server_kwargs'", - "0.3.2", ["server_kwargs"], once=True) - def run(self, image, flavor, boot_server_kwargs=None, - server_kwargs=None, **kwargs): - """Boot and delete server with keypair. - - Plan of this scenario: - - - create a keypair - - boot a VM with created keypair - - delete server - - delete keypair - - :param image: ID of the image to be used for server creation - :param flavor: ID of the flavor to be used for server creation - :param boot_server_kwargs: Optional additional arguments for VM - creation - :param server_kwargs: Deprecated alias for boot_server_kwargs - :param kwargs: Optional additional arguments for keypair creation - """ - - boot_server_kwargs = boot_server_kwargs or server_kwargs or {} - - keypair = self._create_keypair(**kwargs) - server = self._boot_server(image, flavor, - key_name=keypair, - **boot_server_kwargs) - self._delete_server(server) - self._delete_keypair(keypair) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaKeypair.create_and_get_keypair", - platform="openstack") -class CreateAndGetKeypair(utils.NovaScenario): - - def run(self, **kwargs): - """Create a keypair and get the keypair details. - - :param kwargs: Optional additional arguments for keypair creation - """ - - keypair = self._create_keypair(**kwargs) - - self._get_keypair(keypair) diff --git a/rally/plugins/openstack/scenarios/nova/server_groups.py b/rally/plugins/openstack/scenarios/nova/server_groups.py deleted file mode 100755 index 5f792ef635..0000000000 --- a/rally/plugins/openstack/scenarios/nova/server_groups.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2017: Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -LOG = logging.getLogger(__name__) - - -"""Scenarios for Nova Group servers.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServerGroups.create_and_list_server_groups", - platform="openstack") -class CreateAndListServerGroups(utils.NovaScenario): - - def run(self, policies=None, all_projects=False, kwargs=None): - """Create a server group, then list all server groups. - - Measure the "nova server-group-create" and "nova server-group-list" - command performance. - - :param policies: Server group policy - :param all_projects: If True, display server groups from all - projects(Admin only) - :param kwargs: The server group specifications to add. - DEPRECATED, specify arguments explicitly. - """ - if kwargs is None: - kwargs = { - "policies": policies - } - else: - LOG.warning("The argument `kwargs` is deprecated since" - " Rally 0.10.0. Specify all arguments from it" - " explicitly.") - server_group = self._create_server_group(**kwargs) - msg = ("Server Groups isn't created") - self.assertTrue(server_group, err_msg=msg) - - server_groups_list = self._list_server_groups(all_projects) - msg = ("Server Group not included into list of server groups\n" - "Created server group: {}\n" - "list of server groups: {}").format(server_group, - server_groups_list) - self.assertIn(server_group, server_groups_list, err_msg=msg) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServerGroups.create_and_get_server_group", - platform="openstack") -class CreateAndGetServerGroup(utils.NovaScenario): - - def run(self, policies=None, kwargs=None): - """Create a server group, then get its detailed information. - - Measure the "nova server-group-create" and "nova server-group-get" - command performance. - - :param policies: Server group policy - :param kwargs: The server group specifications to add. - DEPRECATED, specify arguments explicitly. - """ - if kwargs is None: - kwargs = { - "policies": policies - } - else: - LOG.warning("The argument `kwargs` is deprecated since" - " Rally 0.10.0. Specify all arguments from it" - " explicitly.") - server_group = self._create_server_group(**kwargs) - msg = ("Server Groups isn't created") - self.assertTrue(server_group, err_msg=msg) - - server_group_info = self._get_server_group(server_group.id) - self.assertEqual(server_group.id, server_group_info.id) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServerGroups.create_and_delete_server_group", - platform="openstack") -class CreateAndDeleteServerGroup(utils.NovaScenario): - - def run(self, policies=None, kwargs=None): - """Create a server group, then delete it. - - Measure the "nova server-group-create" and "nova server-group-delete" - command performance. - - :param policies: Server group policy - :param kwargs: The server group specifications to add. - DEPRECATED, specify arguments explicitly. - """ - if kwargs is None: - kwargs = { - "policies": policies - } - else: - LOG.warning("The argument `kwargs` is deprecated since" - " Rally 0.10.0. Specify all arguments from it" - " explicitly.") - server_group = self._create_server_group(**kwargs) - msg = ("Server Group isn't created") - self.assertTrue(server_group, err_msg=msg) - - self._delete_server_group(server_group.id) diff --git a/rally/plugins/openstack/scenarios/nova/servers.py b/rally/plugins/openstack/scenarios/nova/servers.py deleted file mode 100644 index 91ed064624..0000000000 --- a/rally/plugins/openstack/scenarios/nova/servers.py +++ /dev/null @@ -1,1176 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import jsonschema - -from rally.common import logging -from rally import consts -from rally import exceptions as rally_exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils -from rally.plugins.openstack.scenarios.nova import utils -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import types -from rally.task import validation - - -"""Scenarios for Nova servers.""" - - -LOG = logging.getLogger(__name__) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=(consts.Service.NOVA)) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_list_server", - platform="openstack") -class BootAndListServer(utils.NovaScenario): - - def run(self, image, flavor, detailed=True, **kwargs): - """Boot a server from an image and then list all servers. - - Measure the "nova list" command performance. - - If you have only 1 user in your context, you will - add 1 server on every iteration. So you will have more - and more servers and will be able to measure the - performance of the "nova list" command depending on - the number of servers owned by users. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param detailed: True if the server listing should contain - detailed information about all of them - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - msg = ("Servers isn't created") - self.assertTrue(server, err_msg=msg) - - pool_list = self._list_servers(detailed) - msg = ("Server not included into list of available servers\n" - "Booted server: {}\n" - "Pool of servers: {}").format(server, pool_list) - self.assertIn(server, pool_list, err_msg=msg) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(name="NovaServers.list_servers", platform="openstack") -class ListServers(utils.NovaScenario): - - def run(self, detailed=True): - """List all servers. - - This simple scenario test the nova list command by listing - all the servers. - - :param detailed: True if detailed information about servers - should be listed - """ - self._list_servers(detailed) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_delete_server", - platform="openstack") -class BootAndDeleteServer(utils.NovaScenario): - - def run(self, image, flavor, min_sleep=0, max_sleep=0, - force_delete=False, **kwargs): - """Boot and delete a server. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between volume creation and deletion - (of random duration from [min_sleep, max_sleep]). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_delete_multiple_servers", - platform="openstack") -class BootAndDeleteMultipleServers(utils.NovaScenario): - - def run(self, image, flavor, count=2, min_sleep=0, - max_sleep=0, force_delete=False, **kwargs): - """Boot multiple servers in a single request and delete them. - - Deletion is done in parallel with one request per server, not - with a single request for all servers. - - :param image: The image to boot from - :param flavor: Flavor used to boot instance - :param count: Number of instances to boot - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for instance creation - """ - servers = self._boot_servers(image, flavor, 1, instances_amount=count, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._delete_servers(servers, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume_and_delete", - platform="openstack") -class BootServerFromVolumeAndDelete(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size, volume_type=None, - min_sleep=0, max_sleep=0, force_delete=False, **kwargs): - """Boot a server from volume and then delete it. - - The scenario first creates a volume and then a server. - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between volume creation and deletion - (of random duration from [min_sleep, max_sleep]). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param volume_size: volume size (in GB) - :param volume_type: specifies volume type when there are - multiple backends - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - volume = self.cinder.create_volume(volume_size, imageRef=image, - volume_type=volume_type) - block_device_mapping = {"vda": "%s:::1" % volume.id} - server = self._boot_server(None, flavor, - block_device_mapping=block_device_mapping, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_bounce_server", - platform="openstack") -class BootAndBounceServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, actions=None, **kwargs): - """Boot a server and run specified actions against it. - - Actions should be passed into the actions parameter. Available actions - are 'hard_reboot', 'soft_reboot', 'stop_start', 'rescue_unrescue', - 'pause_unpause', 'suspend_resume', 'lock_unlock' and 'shelve_unshelve'. - Delete server after all actions were completed. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param actions: list of action dictionaries, where each action - dictionary speicifes an action to be performed - in the following format: - {"action_name": } - :param kwargs: Optional additional arguments for server creation - """ - action_builder = self._bind_actions() - actions = actions or [] - try: - action_builder.validate(actions) - except jsonschema.exceptions.ValidationError as error: - raise rally_exceptions.InvalidConfigException( - "Invalid server actions configuration \'%(actions)s\' due to: " - "%(error)s" % {"actions": str(actions), "error": str(error)}) - server = self._boot_server(image, flavor, **kwargs) - for action in action_builder.build_actions(actions, server): - action() - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_lock_unlock_and_delete", - platform="openstack") -class BootLockUnlockAndDelete(utils.NovaScenario): - - def run(self, image, flavor, min_sleep=0, - max_sleep=0, force_delete=False, **kwargs): - """Boot a server, lock it, then unlock and delete it. - - Optional 'min_sleep' and 'max_sleep' parameters allow the - scenario to simulate a pause between locking and unlocking the - server (of random duration from min_sleep to max_sleep). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param min_sleep: Minimum sleep time between locking and unlocking - in seconds - :param max_sleep: Maximum sleep time between locking and unlocking - in seconds - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._lock_server(server) - self.sleep_between(min_sleep, max_sleep) - self._unlock_server(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.GLANCE]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova", "glance"]}, - name="NovaServers.snapshot_server", - platform="openstack") -class SnapshotServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, **kwargs): - """Boot a server, make its snapshot and delete both. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - - server = self._boot_server(image, flavor, **kwargs) - image = self._create_image(server) - self._delete_server(server, force=force_delete) - - server = self._boot_server(image.id, flavor, **kwargs) - self._delete_server(server, force=force_delete) - self._delete_image(image) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_server", - platform="openstack") -class BootServer(utils.NovaScenario): - - def run(self, image, flavor, auto_assign_nic=False, **kwargs): - """Boot a server. - - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param auto_assign_nic: True if NICs should be assigned - :param kwargs: Optional additional arguments for server creation - """ - self._boot_server(image, flavor, - auto_assign_nic=auto_assign_nic, **kwargs) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume", - platform="openstack") -class BootServerFromVolume(utils.NovaScenario, cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size, - volume_type=None, auto_assign_nic=False, **kwargs): - """Boot a server from volume. - - The scenario first creates a volume and then a server. - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param volume_size: volume size (in GB) - :param volume_type: specifies volume type when there are - multiple backends - :param auto_assign_nic: True if NICs should be assigned - :param kwargs: Optional additional arguments for server creation - """ - volume = self.cinder.create_volume(volume_size, imageRef=image, - volume_type=volume_type) - block_device_mapping = {"vda": "%s:::1" % volume.id} - self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic, - block_device_mapping=block_device_mapping, - **kwargs) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}, - to_flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=(consts.Service.NOVA)) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.resize_server", platform="openstack") -class ResizeServer(utils.NovaScenario): - - def run(self, image, flavor, to_flavor, force_delete=False, **kwargs): - """Boot a server, then resize and delete it. - - This test will confirm the resize by default, - or revert the resize if confirm is set to false. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param to_flavor: flavor to be used to resize the booted instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._resize(server, to_flavor) - # by default we confirm - confirm = kwargs.get("confirm", True) - if confirm: - self._resize_confirm(server) - else: - self._resize_revert(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}, - to_flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.resize_shutoff_server", - platform="openstack") -class ResizeShutoffServer(utils.NovaScenario): - - def run(self, image, flavor, to_flavor, confirm=True, - force_delete=False, **kwargs): - """Boot a server and stop it, then resize and delete it. - - This test will confirm the resize by default, - or revert the resize if confirm is set to false. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param to_flavor: flavor to be used to resize the booted instance - :param confirm: True if need to confirm resize else revert resize - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._stop_server(server) - self._resize(server, to_flavor) - - if confirm: - self._resize_confirm(server, "SHUTOFF") - else: - self._resize_revert(server, "SHUTOFF") - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}, - to_flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["cinder", "nova"]}, - name="NovaServers.boot_server_attach_created_volume_and_resize", - platform="openstack") -class BootServerAttachCreatedVolumeAndResize(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, to_flavor, volume_size, min_sleep=0, - max_sleep=0, force_delete=False, confirm=True, do_delete=True, - boot_server_kwargs=None, create_volume_kwargs=None): - """Create a VM from image, attach a volume to it and resize. - - Simple test to create a VM and attach a volume, then resize the VM, - detach the volume then delete volume and VM. - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between attaching a volume and running resize - (of random duration from range [min_sleep, max_sleep]). - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param to_flavor: flavor to be used to resize the booted instance - :param volume_size: volume size (in GB) - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param confirm: True if need to confirm resize else revert resize - :param do_delete: True if resources needs to be deleted explicitly - else use rally cleanup to remove resources - :param boot_server_kwargs: optional arguments for VM creation - :param create_volume_kwargs: optional arguments for volume creation - """ - boot_server_kwargs = boot_server_kwargs or {} - create_volume_kwargs = create_volume_kwargs or {} - - server = self._boot_server(image, flavor, **boot_server_kwargs) - volume = self.cinder.create_volume(volume_size, **create_volume_kwargs) - - self._attach_volume(server, volume) - self.sleep_between(min_sleep, max_sleep) - self._resize(server, to_flavor) - - if confirm: - self._resize_confirm(server) - else: - self._resize_revert(server) - - if do_delete: - self._detach_volume(server, volume) - self.cinder.delete_volume(volume) - self._delete_server(server, force=force_delete) - - -@validation.add("number", param_name="volume_num", minval=1, - integer_only=True) -@validation.add("number", param_name="volume_size", minval=1, - integer_only=True) -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["cinder", "nova"]}, - name="NovaServers.boot_server_attach_volume_and_list_attachments", - platform="openstack") -class BootServerAttachVolumeAndListAttachments(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size=1, volume_num=2, - boot_server_kwargs=None, create_volume_kwargs=None): - """Create a VM, attach N volume to it and list server's attachemnt. - - Measure the "nova volume-attachments" command performance. - - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param volume_size: volume size (in GB), default 1G - :param volume_num: the num of attached volume - :param boot_server_kwargs: optional arguments for VM creation - :param create_volume_kwargs: optional arguments for volume creation - """ - boot_server_kwargs = boot_server_kwargs or {} - create_volume_kwargs = create_volume_kwargs or {} - - server = self._boot_server(image, flavor, **boot_server_kwargs) - attachments = [] - for i in range(volume_num): - volume = self.cinder.create_volume(volume_size, - **create_volume_kwargs) - attachments.append(self._attach_volume(server, volume)) - - list_attachments = self._list_attachments(server.id) - - for attachment in attachments: - msg = ("attachment not included into list of available" - "attachments\n attachment: {}\n" - "list attachments: {}").format(attachment, list_attachments) - self.assertIn(attachment, list_attachments, err_msg=msg) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}, - to_flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume_and_resize", - platform="openstack") -class BootServerFromVolumeAndResize(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, to_flavor, volume_size, min_sleep=0, - max_sleep=0, force_delete=False, confirm=True, do_delete=True, - boot_server_kwargs=None, create_volume_kwargs=None): - """Boot a server from volume, then resize and delete it. - - The scenario first creates a volume and then a server. - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between volume creation and deletion - (of random duration from [min_sleep, max_sleep]). - - This test will confirm the resize by default, - or revert the resize if confirm is set to false. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param to_flavor: flavor to be used to resize the booted instance - :param volume_size: volume size (in GB) - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param force_delete: True if force_delete should be used - :param confirm: True if need to confirm resize else revert resize - :param do_delete: True if resources needs to be deleted explicitly - else use rally cleanup to remove resources - :param boot_server_kwargs: optional arguments for VM creation - :param create_volume_kwargs: optional arguments for volume creation - """ - boot_server_kwargs = boot_server_kwargs or {} - create_volume_kwargs = create_volume_kwargs or {} - - if boot_server_kwargs.get("block_device_mapping"): - LOG.warning("Using already existing volume is not permitted.") - - volume = self.cinder.create_volume(volume_size, imageRef=image, - **create_volume_kwargs) - boot_server_kwargs["block_device_mapping"] = { - "vda": "%s:::1" % volume.id} - - server = self._boot_server(None, flavor, **boot_server_kwargs) - self.sleep_between(min_sleep, max_sleep) - self._resize(server, to_flavor) - - if confirm: - self._resize_confirm(server) - else: - self._resize_revert(server) - - if do_delete: - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.suspend_and_resume_server", - platform="openstack") -class SuspendAndResumeServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, **kwargs): - """Create a server, suspend, resume and then delete it - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._suspend_server(server) - self._resume_server(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.pause_and_unpause_server", - platform="openstack") -class PauseAndUnpauseServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, **kwargs): - """Create a server, pause, unpause and then delete it - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._pause_server(server) - self._unpause_server(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.shelve_and_unshelve_server", - platform="openstack") -class ShelveAndUnshelveServer(utils.NovaScenario): - - def run(self, image, flavor, force_delete=False, **kwargs): - """Create a server, shelve, unshelve and then delete it - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param force_delete: True if force_delete should be used - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._shelve_server(server) - self._unshelve_server(server) - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_live_migrate_server", - platform="openstack") -class BootAndLiveMigrateServer(utils.NovaScenario): - - def run(self, image, flavor, block_migration=False, disk_over_commit=False, - min_sleep=0, max_sleep=0, **kwargs): - """Live Migrate a server. - - This scenario launches a VM on a compute node available in - the availability zone and then migrates the VM to another - compute node on the same availability zone. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between VM booting and running live migration - (of random duration from range [min_sleep, max_sleep]). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param block_migration: Specifies the migration type - :param disk_over_commit: Specifies whether to allow overcommit - on migrated instance or not - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self.sleep_between(min_sleep, max_sleep) - - self._live_migrate(server, block_migration, disk_over_commit) - - self._delete_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", validate_disk=False) -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure( - context={"cleanup@openstack": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume_and_live_migrate", - platform="openstack") -class BootServerFromVolumeAndLiveMigrate(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size, volume_type=None, - block_migration=False, disk_over_commit=False, force_delete=False, - min_sleep=0, max_sleep=0, **kwargs): - """Boot a server from volume and then migrate it. - - The scenario first creates a volume and a server booted from - the volume on a compute node available in the availability zone and - then migrates the VM to another compute node on the same availability - zone. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between VM booting and running live migration - (of random duration from range [min_sleep, max_sleep]). - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param volume_size: volume size (in GB) - :param volume_type: specifies volume type when there are - multiple backends - :param block_migration: Specifies the migration type - :param disk_over_commit: Specifies whether to allow overcommit - on migrated instance or not - :param force_delete: True if force_delete should be used - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - :param kwargs: Optional additional arguments for server creation - """ - volume = self.cinder.create_volume(volume_size, imageRef=image, - volume_type=volume_type) - block_device_mapping = {"vda": "%s:::1" % volume.id} - server = self._boot_server(None, flavor, - block_device_mapping=block_device_mapping, - **kwargs) - self.sleep_between(min_sleep, max_sleep) - - self._live_migrate(server, block_migration, disk_over_commit) - - self._delete_server(server, force=force_delete) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure( - context={"cleanup@openstack": ["cinder", "nova"]}, - name="NovaServers.boot_server_attach_created_volume_and_live_migrate", - platform="openstack") -class BootServerAttachCreatedVolumeAndLiveMigrate(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, size, block_migration=False, - disk_over_commit=False, boot_server_kwargs=None, - create_volume_kwargs=None, min_sleep=0, max_sleep=0): - """Create a VM, attach a volume to it and live migrate. - - Simple test to create a VM and attach a volume, then migrate the VM, - detach the volume and delete volume/VM. - - Optional 'min_sleep' and 'max_sleep' parameters allow the scenario - to simulate a pause between attaching a volume and running live - migration (of random duration from range [min_sleep, max_sleep]). - - :param image: Glance image name to use for the VM - :param flavor: VM flavor name - :param size: volume size (in GB) - :param block_migration: Specifies the migration type - :param disk_over_commit: Specifies whether to allow overcommit - on migrated instance or not - :param boot_server_kwargs: optional arguments for VM creation - :param create_volume_kwargs: optional arguments for volume creation - :param min_sleep: Minimum sleep time in seconds (non-negative) - :param max_sleep: Maximum sleep time in seconds (non-negative) - """ - - if boot_server_kwargs is None: - boot_server_kwargs = {} - if create_volume_kwargs is None: - create_volume_kwargs = {} - - server = self._boot_server(image, flavor, **boot_server_kwargs) - volume = self.cinder.create_volume(size, **create_volume_kwargs) - - self._attach_volume(server, volume) - - self.sleep_between(min_sleep, max_sleep) - - self._live_migrate(server, block_migration, disk_over_commit) - - self._detach_volume(server, volume) - - self.cinder.delete_volume(volume) - self._delete_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_migrate_server", - platform="openstack") -class BootAndMigrateServer(utils.NovaScenario): - - def run(self, image, flavor, **kwargs): - """Migrate a server. - - This scenario launches a VM on a compute node available in - the availability zone, and then migrates the VM - to another compute node on the same availability zone. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._migrate(server) - # NOTE(wtakase): This is required because cold migration and resize - # share same code path. - confirm = kwargs.get("confirm", True) - if confirm: - self._resize_confirm(server, status="ACTIVE") - else: - self._resize_revert(server, status="ACTIVE") - self._delete_server(server) - - -@types.convert(from_image={"type": "glance_image"}, - to_image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="from_image") -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="to_image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_rebuild_server", - platform="openstack") -class BootAndRebuildServer(utils.NovaScenario): - - def run(self, from_image, to_image, flavor, **kwargs): - """Rebuild a server. - - This scenario launches a VM, then rebuilds that VM with a - different image. - - :param from_image: image to be used to boot an instance - :param to_image: image to be used to rebuild the instance - :param flavor: flavor to be used to boot an instance - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(from_image, flavor, **kwargs) - self._rebuild_server(server, to_image) - self._delete_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure( - context={"cleanup@openstack": ["nova", "neutron.floatingip"]}, - name="NovaServers.boot_and_associate_floating_ip", - platform="openstack") -class BootAndAssociateFloatingIp(utils.NovaScenario): - - def run(self, image, flavor, create_floating_ip_args=None, **kwargs): - """Boot a server and associate a floating IP to it. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param create_floating_ip_args: Optional additional arguments for - floating ip creation - :param kwargs: Optional additional arguments for server creation - """ - create_floating_ip_args = create_floating_ip_args or {} - server = self._boot_server(image, flavor, **kwargs) - address = network_wrapper.wrap(self.clients, self).create_floating_ip( - tenant_id=server.tenant_id, **create_floating_ip_args) - self._associate_floating_ip(server, address["ip"]) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova", "neutron"]}, - name="NovaServers.boot_server_and_attach_interface", - platform="openstack") -class BootServerAndAttachInterface(utils.NovaScenario, - neutron_utils.NeutronScenario): - def run(self, image, flavor, network_create_args=None, - subnet_create_args=None, subnet_cidr_start=None, - boot_server_args=None): - """Create server and subnet, then attach the interface to it. - - This scenario measures the "nova interface-attach" command performance. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param network_create_args: dict, POST /v2.0/networks request - options. - :param subnet_create_args: dict, POST /v2.0/subnets request options - :param subnet_cidr_start: str, start value for subnets CIDR - :param boot_server_args: Optional additional arguments for - server creation - """ - network = self._get_or_create_network(network_create_args) - self._create_subnet(network, subnet_create_args, subnet_cidr_start) - - server = self._boot_server(image, flavor, **boot_server_args) - self._attach_interface(server, net_id=network["network"]["id"]) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_show_server", - platform="openstack") -class BootAndShowServer(utils.NovaScenario): - - def run(self, image, flavor, **kwargs): - """Show server details. - - This simple scenario tests the nova show command by retrieving - the server details. - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param kwargs: Optional additional arguments for server creation - - :returns: Server details - """ - server = self._boot_server(image, flavor, **kwargs) - self._show_server(server) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_get_console_output", - platform="openstack") -class BootAndGetConsoleOutput(utils.NovaScenario): - - def run(self, image, flavor, length=None, **kwargs): - """Get text console output from server. - - This simple scenario tests the nova console-log command by retrieving - the text console log output. - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param length: The number of tail log lines you would like to retrieve. - None (default value) or -1 means unlimited length. - :param kwargs: Optional additional arguments for server creation - - :returns: Text console log output for server - """ - server = self._boot_server(image, flavor, **kwargs) - self._get_server_console_output(server, length) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_update_server", - platform="openstack") -class BootAndUpdateServer(utils.NovaScenario): - - def run(self, image, flavor, description=None, **kwargs): - """Boot a server, then update its name and description. - - The scenario first creates a server, then update it. - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param description: update the server description - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._update_server(server, description) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"]}, - name="NovaServers.boot_server_from_volume_snapshot", - platform="openstack") -class BootServerFromVolumeSnapshot(utils.NovaScenario, - cinder_utils.CinderBasic): - - def run(self, image, flavor, volume_size, volume_type=None, - auto_assign_nic=False, **kwargs): - """Boot a server from a snapshot. - - The scenario first creates a volume and creates a - snapshot from this volume, then boots a server from - the created snapshot. - Assumes that cleanup is done elsewhere. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param volume_size: volume size (in GB) - :param volume_type: specifies volume type when there are - multiple backends - :param auto_assign_nic: True if NICs should be assigned - :param kwargs: Optional additional arguments for server creation - """ - volume = self.cinder.create_volume(volume_size, imageRef=image, - volume_type=volume_type) - snapshot = self.cinder.create_snapshot(volume.id, force=False) - block_device_mapping = {"vda": "%s:snap::1" % snapshot.id} - self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic, - block_device_mapping=block_device_mapping, - **kwargs) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure( - context={"cleanup@openstack": ["nova", "neutron.floatingip"]}, - name="NovaServers.boot_server_associate_and_dissociate_floating_ip", - platform="openstack") -class BootServerAssociateAndDissociateFloatingIP(utils.NovaScenario): - - def run(self, image, flavor, create_floating_ip_args=None, **kwargs): - """Boot a server associate and dissociate a floating IP from it. - - The scenario first boot a server and create a floating IP. then - associate the floating IP to the server.Finally dissociate the floating - IP. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param create_floating_ip_args: Optional additional arguments for - floating ip creation - :param kwargs: Optional additional arguments for server creation - """ - - create_floating_ip_args = create_floating_ip_args or {} - server = self._boot_server(image, flavor, **kwargs) - address = network_wrapper.wrap(self.clients, self).create_floating_ip( - tenant_id=server.tenant_id, **create_floating_ip_args) - self._associate_floating_ip(server, address["ip"]) - self._dissociate_floating_ip(server, address["ip"]) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@validation.add("required_contexts", contexts=("network")) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_server_and_list_interfaces", - platform="openstack") -class BootServerAndListInterfaces(utils.NovaScenario): - - def run(self, image, flavor, **kwargs): - """Boot a server and list interfaces attached to it. - - Measure the "nova boot" and "nova interface-list" command performance. - - :param image: ID of the image to be used for server creation - :param flavor: ID of the flavor to be used for server creation - :param **kwargs: Optional arguments for booting the instance - """ - server = self._boot_server(image, flavor, **kwargs) - self._list_interfaces(server) - - -@validation.add( - "enum", param_name="console_type", - values=["novnc", "xvpvnc", "spice-html5", "rdp-html5", "serial", "webmks"]) -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova"]}, - name="NovaServers.boot_and_get_console_url", - platform="openstack") -class BootAndGetConsoleUrl(utils.NovaScenario): - - def run(self, image, flavor, console_type, **kwargs): - """Retrieve a console url of a server. - - This simple scenario tests retrieving the console url of a server. - - :param image: image to be used to boot an instance - :param flavor: flavor to be used to boot an instance - :param console_type: type can be novnc/xvpvnc for protocol vnc; - spice-html5 for protocol spice; rdp-html5 for - protocol rdp; serial for protocol serial. - webmks for protocol mks (since version 2.8). - :param kwargs: Optional additional arguments for server creation - """ - server = self._boot_server(image, flavor, **kwargs) - self._get_console_url_server(server, console_type) diff --git a/rally/plugins/openstack/scenarios/nova/services.py b/rally/plugins/openstack/scenarios/nova/services.py deleted file mode 100644 index 859afdee20..0000000000 --- a/rally/plugins/openstack/scenarios/nova/services.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.nova import utils -from rally.task import validation - - -"""Scenarios for Nova agents.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(name="NovaServices.list_services", platform="openstack") -class ListServices(utils.NovaScenario): - - def run(self, host=None, binary=None): - """List all nova services. - - Measure the "nova service-list" command performance. - - :param host: List nova services on host - :param binary: List nova services matching given binary - """ - self._list_services(host, binary) diff --git a/rally/plugins/openstack/scenarios/nova/utils.py b/rally/plugins/openstack/scenarios/nova/utils.py deleted file mode 100644 index c7a0bb0a20..0000000000 --- a/rally/plugins/openstack/scenarios/nova/utils.py +++ /dev/null @@ -1,1255 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from rally.common import cfg -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.services.image import image as image_service -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF -LOG = logging.getLogger(__file__) - - -class NovaScenario(scenario.OpenStackScenario): - """Base class for Nova scenarios with basic atomic actions.""" - - @atomic.action_timer("nova.list_servers") - def _list_servers(self, detailed=True): - """Returns user servers list.""" - return self.clients("nova").servers.list(detailed) - - def _pick_random_nic(self): - """Choose one network from existing ones.""" - ctxt = self.context - nets = [net["id"] - for net in ctxt.get("tenant", {}).get("networks", [])] - if nets: - # NOTE(amaretskiy): Balance servers among networks. - net_idx = self.context["iteration"] % len(nets) - return [{"net-id": nets[net_idx]}] - - @atomic.action_timer("nova.boot_server") - def _boot_server(self, image, flavor, - auto_assign_nic=False, **kwargs): - """Boot a server. - - Returns when the server is actually booted and in "ACTIVE" state. - - If multiple networks created by Network context are present, the first - network found that isn't associated with a floating IP pool is used. - - :param image: image ID or instance for server creation - :param flavor: int, flavor ID or instance for server creation - :param auto_assign_nic: bool, whether or not to auto assign NICs - :param kwargs: other optional parameters to initialize the server - :returns: nova Server instance - """ - server_name = self.generate_random_name() - secgroup = self.context.get("user", {}).get("secgroup") - if secgroup: - if "security_groups" not in kwargs: - kwargs["security_groups"] = [secgroup["name"]] - elif secgroup["name"] not in kwargs["security_groups"]: - kwargs["security_groups"].append(secgroup["name"]) - - if auto_assign_nic and not kwargs.get("nics", False): - nic = self._pick_random_nic() - if nic: - kwargs["nics"] = nic - - server = self.clients("nova").servers.create( - server_name, image, flavor, **kwargs) - - self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay) - server = utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_boot_timeout, - check_interval=CONF.openstack.nova_server_boot_poll_interval - ) - return server - - def _do_server_reboot(self, server, reboottype): - server.reboot(reboot_type=reboottype) - self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_reboot_timeout, - check_interval=CONF.openstack.nova_server_reboot_poll_interval - ) - - @atomic.action_timer("nova.soft_reboot_server") - def _soft_reboot_server(self, server): - """Reboot a server with soft reboot. - - A soft reboot will be issued on the given server upon which time - this method will wait for the server to become active. - - :param server: The server to reboot. - """ - self._do_server_reboot(server, "SOFT") - - @atomic.action_timer("nova.show_server") - def _show_server(self, server): - """Show server details. - - :param server: The server to get details for. - - :returns: Server details - """ - return self.clients("nova").servers.get(server) - - @atomic.action_timer("nova.get_console_output_server") - def _get_server_console_output(self, server, length=None): - """Get text of a console log output from a server. - - :param server: The server whose console output to retrieve - :param length: The number of tail log lines you would like to retrieve. - - :returns: Text console output from server - """ - return self.clients("nova").servers.get_console_output(server, - length=length) - - @atomic.action_timer("nova.get_console_url_server") - def _get_console_url_server(self, server, console_type): - """Retrieve a console url of a server. - - :param server: server to get console url for - :param console_type: type can be novnc/xvpvnc for protocol vnc; - spice-html5 for protocol spice; rdp-html5 for - protocol rdp; serial for protocol serial. - webmks for protocol mks (since version 2.8). - - :returns: An instance of novaclient.base.DictWithMeta - """ - return self.clients("nova").servers.get_console_url(server, - console_type) - - @atomic.action_timer("nova.reboot_server") - def _reboot_server(self, server): - """Reboot a server with hard reboot. - - A reboot will be issued on the given server upon which time - this method will wait for the server to become active. - - :param server: The server to reboot. - """ - self._do_server_reboot(server, "HARD") - - @atomic.action_timer("nova.rebuild_server") - def _rebuild_server(self, server, image, **kwargs): - """Rebuild a server with a new image. - - :param server: The server to rebuild. - :param image: The new image to rebuild the server with. - :param kwargs: Optional additional arguments to pass to the rebuild - """ - server.rebuild(image, **kwargs) - self.sleep_between(CONF.openstack.nova_server_rebuild_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_rebuild_timeout, - check_interval=CONF.openstack.nova_server_rebuild_poll_interval - ) - - @atomic.action_timer("nova.start_server") - def _start_server(self, server): - """Start the given server. - - A start will be issued for the given server upon which time - this method will wait for it to become ACTIVE. - - :param server: The server to start and wait to become ACTIVE. - """ - server.start() - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_start_timeout, - check_interval=CONF.openstack.nova_server_start_poll_interval - ) - - @atomic.action_timer("nova.stop_server") - def _stop_server(self, server): - """Stop the given server. - - Issues a stop on the given server and waits for the server - to become SHUTOFF. - - :param server: The server to stop. - """ - server.stop() - utils.wait_for_status( - server, - ready_statuses=["SHUTOFF"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_stop_timeout, - check_interval=CONF.openstack.nova_server_stop_poll_interval - ) - - @atomic.action_timer("nova.rescue_server") - def _rescue_server(self, server): - """Rescue the given server. - - Returns when the server is actually rescue and is in the "Rescue" - state. - - :param server: Server object - """ - server.rescue() - self.sleep_between(CONF.openstack.nova_server_rescue_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["RESCUE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_rescue_timeout, - check_interval=CONF.openstack.nova_server_rescue_poll_interval - ) - - @atomic.action_timer("nova.unrescue_server") - def _unrescue_server(self, server): - """Unrescue the given server. - - Returns when the server is unrescue and waits to become ACTIVE - - :param server: Server object - """ - server.unrescue() - self.sleep_between(CONF.openstack.nova_server_unrescue_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_unrescue_timeout, - check_interval=CONF.openstack.nova_server_unrescue_poll_interval - ) - - @atomic.action_timer("nova.suspend_server") - def _suspend_server(self, server): - """Suspends the given server. - - Returns when the server is actually suspended and is in the "Suspended" - state. - - :param server: Server object - """ - server.suspend() - self.sleep_between(CONF.openstack.nova_server_suspend_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["SUSPENDED"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_suspend_timeout, - check_interval=CONF.openstack.nova_server_suspend_poll_interval - ) - - @atomic.action_timer("nova.resume_server") - def _resume_server(self, server): - """Resumes the suspended server. - - Returns when the server is actually resumed and is in the "ACTIVE" - state. - - :param server: Server object - """ - server.resume() - self.sleep_between(CONF.openstack.nova_server_resume_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_resume_timeout, - check_interval=CONF.openstack.nova_server_resume_poll_interval - ) - - @atomic.action_timer("nova.pause_server") - def _pause_server(self, server): - """Pause the live server. - - Returns when the server is actually paused and is in the "PAUSED" - state. - - :param server: Server object - """ - server.pause() - self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["PAUSED"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_pause_timeout, - check_interval=CONF.openstack.nova_server_pause_poll_interval - ) - - @atomic.action_timer("nova.unpause_server") - def _unpause_server(self, server): - """Unpause the paused server. - - Returns when the server is actually unpaused and is in the "ACTIVE" - state. - - :param server: Server object - """ - server.unpause() - self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_unpause_timeout, - check_interval=CONF.openstack.nova_server_unpause_poll_interval - ) - - @atomic.action_timer("nova.shelve_server") - def _shelve_server(self, server): - """Shelve the given server. - - Returns when the server is actually shelved and is in the - "SHELVED_OFFLOADED" state. - - :param server: Server object - """ - server.shelve() - self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["SHELVED_OFFLOADED"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_shelve_timeout, - check_interval=CONF.openstack.nova_server_shelve_poll_interval - ) - - @atomic.action_timer("nova.unshelve_server") - def _unshelve_server(self, server): - """Unshelve the given server. - - Returns when the server is unshelved and is in the "ACTIVE" state. - - :param server: Server object - """ - server.unshelve() - - self.sleep_between(CONF.openstack. nova_server_unshelve_prepoll_delay) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_unshelve_timeout, - check_interval=CONF.openstack.nova_server_unshelve_poll_interval - ) - - def _delete_server(self, server, force=False): - """Delete the given server. - - Returns when the server is actually deleted. - - :param server: Server object - :param force: If True, force_delete will be used instead of delete. - """ - atomic_name = ("nova.%sdelete_server") % (force and "force_" or "") - with atomic.ActionTimer(self, atomic_name): - if force: - server.force_delete() - else: - server.delete() - - utils.wait_for_status( - server, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_delete_timeout, - check_interval=CONF.openstack.nova_server_delete_poll_interval - ) - - def _delete_servers(self, servers, force=False): - """Delete multiple servers. - - :param servers: A list of servers to delete - :param force: If True, force_delete will be used instead of delete. - """ - atomic_name = ("nova.%sdelete_servers") % (force and "force_" or "") - with atomic.ActionTimer(self, atomic_name): - for server in servers: - if force: - server.force_delete() - else: - server.delete() - - for server in servers: - utils.wait_for_status( - server, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_delete_timeout, - check_interval=( - CONF.openstack.nova_server_delete_poll_interval) - ) - - @atomic.action_timer("nova.create_server_group") - def _create_server_group(self, **kwargs): - """Create (allocate) a server group. - - :param kwargs: Optional additional arguments for Server group creating - - :returns: Nova server group - """ - group_name = self.generate_random_name() - return self.clients("nova").server_groups.create(name=group_name, - **kwargs) - - @atomic.action_timer("nova.get_server_group") - def _get_server_group(self, id): - """Get a specific server group. - - :param id: Unique ID of the server group to get - - :rtype: :class:`ServerGroup` - """ - return self.clients("nova").server_groups.get(id) - - @atomic.action_timer("nova.list_server_groups") - def _list_server_groups(self, all_projects=False): - """Get a list of all server groups. - - :param all_projects: If True, display server groups from all - projects(Admin only) - - :rtype: list of :class:`ServerGroup`. - """ - if all_projects: - return self.admin_clients("nova").server_groups.list(all_projects) - else: - return self.clients("nova").server_groups.list(all_projects) - - @atomic.action_timer("nova.delete_server_group") - def _delete_server_group(self, group_id): - """Delete a specific server group. - - :param id: The ID of the :class:`ServerGroup` to delete - - :returns: An instance of novaclient.base.TupleWithMeta - """ - return self.clients("nova").server_groups.delete(group_id) - - @atomic.action_timer("nova.delete_image") - def _delete_image(self, image): - """Delete the given image. - - Returns when the image is actually deleted. - - :param image: Image object - """ - LOG.warning("Method '_delete_image' of NovaScenario class is " - "deprecated since Rally 0.10.0. Use GlanceUtils instead.") - glance = image_service.Image(self._clients, - atomic_inst=self.atomic_actions()) - glance.delete_image(image.id) - check_interval = CONF.openstack.nova_server_image_delete_poll_interval - with atomic.ActionTimer(self, "glance.wait_for_delete"): - utils.wait_for_status( - image, - ready_statuses=["deleted", "pending_delete"], - check_deletion=True, - update_resource=glance.get_image, - timeout=CONF.openstack.nova_server_image_delete_timeout, - check_interval=check_interval - ) - - @atomic.action_timer("nova.create_image") - def _create_image(self, server): - """Create an image from the given server - - Uses the server name to name the created image. Returns when the image - is actually created and is in the "Active" state. - - :param server: Server object for which the image will be created - - :returns: Created image object - """ - image_uuid = self.clients("nova").servers.create_image(server, - server.name) - glance = image_service.Image(self._clients, - atomic_inst=self.atomic_actions()) - image = glance.get_image(image_uuid) - check_interval = CONF.openstack.nova_server_image_create_poll_interval - with atomic.ActionTimer(self, "glance.wait_for_image"): - image = utils.wait_for_status( - image, - ready_statuses=["ACTIVE"], - update_resource=glance.get_image, - timeout=CONF.openstack.nova_server_image_create_timeout, - check_interval=check_interval - ) - return image - - @atomic.action_timer("nova.list_images") - def _list_images(self, detailed=False, **kwargs): - """List all images. - - :param detailed: True if the image listing - should contain detailed information - :param kwargs: Optional additional arguments for image listing - - :returns: Image list - """ - LOG.warning("Method '_delete_image' of NovaScenario class is " - "deprecated since Rally 0.10.0. Use GlanceUtils instead.") - glance = image_service.Image(self._clients, - atomic_inst=self.atomic_actions()) - return glance.list_images() - - @atomic.action_timer("nova.get_keypair") - def _get_keypair(self, keypair): - """Get a keypair. - - :param keypair: The ID of the keypair to get. - :rtype: :class:`Keypair` - """ - return self.clients("nova").keypairs.get(keypair) - - @atomic.action_timer("nova.create_keypair") - def _create_keypair(self, **kwargs): - """Create a keypair - - :returns: Created keypair name - """ - keypair_name = self.generate_random_name() - keypair = self.clients("nova").keypairs.create(keypair_name, **kwargs) - return keypair.name - - @atomic.action_timer("nova.list_keypairs") - def _list_keypairs(self): - """Return user keypairs list.""" - return self.clients("nova").keypairs.list() - - @atomic.action_timer("nova.delete_keypair") - def _delete_keypair(self, keypair_name): - """Delete keypair - - :param keypair_name: The keypair name to delete. - """ - self.clients("nova").keypairs.delete(keypair_name) - - @atomic.action_timer("nova.boot_servers") - def _boot_servers(self, image_id, flavor_id, requests, instances_amount=1, - auto_assign_nic=False, **kwargs): - """Boot multiple servers. - - Returns when all the servers are actually booted and are in the - "Active" state. - - :param image_id: ID of the image to be used for server creation - :param flavor_id: ID of the flavor to be used for server creation - :param requests: Number of booting requests to perform - :param instances_amount: Number of instances to boot per each request - :param auto_assign_nic: bool, whether or not to auto assign NICs - :param kwargs: other optional parameters to initialize the servers - - :returns: List of created server objects - """ - if auto_assign_nic and not kwargs.get("nics", False): - nic = self._pick_random_nic() - if nic: - kwargs["nics"] = nic - - name_prefix = self.generate_random_name() - for i in range(requests): - self.clients("nova").servers.create("%s_%d" % (name_prefix, i), - image_id, flavor_id, - min_count=instances_amount, - max_count=instances_amount, - **kwargs) - # NOTE(msdubov): Nova python client returns only one server even when - # min_count > 1, so we have to rediscover all the - # created servers manually. - servers = [s for s in self.clients("nova").servers.list() - if s.name.startswith(name_prefix)] - self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay) - servers = [utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils. - get_from_manager(), - timeout=CONF.openstack.nova_server_boot_timeout, - check_interval=CONF.openstack.nova_server_boot_poll_interval - ) for server in servers] - return servers - - @atomic.action_timer("nova.associate_floating_ip") - def _associate_floating_ip(self, server, address, fixed_address=None): - """Add floating IP to an instance - - :param server: The :class:`Server` to add an IP to. - :param address: The dict-like representation of FloatingIP to add - to the instance - :param fixed_address: The fixedIP address the FloatingIP is to be - associated with (optional) - """ - with atomic.ActionTimer(self, "neutron.list_ports"): - ports = self.clients("neutron").list_ports(device_id=server.id) - port = ports["ports"][0] - - fip = address - if not isinstance(address, dict): - LOG.warning( - "The argument 'address' of " - "NovaScenario._associate_floating_ip method accepts a " - "dict-like representation of floating ip. Transmitting a " - "string with just an IP is deprecated.") - with atomic.ActionTimer(self, "neutron.list_floating_ips"): - all_fips = self.clients("neutron").list_floatingips( - tenant_id=self.context["tenant"]["id"]) - filtered_fip = [f for f in all_fips["floatingips"] - if f["floating_ip_address"] == address] - if not filtered_fip: - raise exceptions.NotFoundException( - "There is no floating ip with '%s' address." % address) - fip = filtered_fip[0] - # the first case: fip object is returned from network wrapper - # the second case: from neutronclient directly - fip_ip = fip.get("ip", fip.get("floating_ip_address", None)) - fip_update_dict = {"port_id": port["id"]} - if fixed_address: - fip_update_dict["fixed_ip_address"] = fixed_address - self.clients("neutron").update_floatingip( - fip["id"], {"floatingip": fip_update_dict} - ) - utils.wait_for(server, - is_ready=self.check_ip_address(fip_ip), - update_resource=utils.get_from_manager()) - # Update server data - server.addresses = server.manager.get(server.id).addresses - - @atomic.action_timer("nova.dissociate_floating_ip") - def _dissociate_floating_ip(self, server, address): - """Remove floating IP from an instance - - :param server: The :class:`Server` to add an IP to. - :param address: The dict-like representation of FloatingIP to remove - """ - fip = address - if not isinstance(fip, dict): - LOG.warning( - "The argument 'address' of " - "NovaScenario._dissociate_floating_ip method accepts a " - "dict-like representation of floating ip. Transmitting a " - "string with just an IP is deprecated.") - with atomic.ActionTimer(self, "neutron.list_floating_ips"): - all_fips = self.clients("neutron").list_floatingips( - tenant_id=self.context["tenant"]["id"] - ) - filtered_fip = [f for f in all_fips["floatingips"] - if f["floating_ip_address"] == address] - if not filtered_fip: - raise exceptions.NotFoundException( - "There is no floating ip with '%s' address." % address) - fip = filtered_fip[0] - self.clients("neutron").update_floatingip( - fip["id"], {"floatingip": {"port_id": None}} - ) - # the first case: fip object is returned from network wrapper - # the second case: from neutronclient directly - fip_ip = fip.get("ip", fip.get("floating_ip_address", None)) - utils.wait_for( - server, - is_ready=self.check_ip_address(fip_ip, must_exist=False), - update_resource=utils.get_from_manager() - ) - # Update server data - server.addresses = server.manager.get(server.id).addresses - - @staticmethod - def check_ip_address(address, must_exist=True): - ip_to_check = getattr(address, "ip", address) - - def _check_addr(resource): - for network, addr_list in resource.addresses.items(): - for addr in addr_list: - if ip_to_check == addr["addr"]: - return must_exist - return not must_exist - return _check_addr - - @atomic.action_timer("nova.resize") - def _resize(self, server, flavor): - server.resize(flavor) - utils.wait_for_status( - server, - ready_statuses=["VERIFY_RESIZE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_resize_timeout, - check_interval=CONF.openstack.nova_server_resize_poll_interval - ) - - @atomic.action_timer("nova.resize_confirm") - def _resize_confirm(self, server, status="ACTIVE"): - server.confirm_resize() - utils.wait_for_status( - server, - ready_statuses=[status], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_resize_confirm_timeout, - check_interval=( - CONF.openstack.nova_server_resize_confirm_poll_interval) - ) - - @atomic.action_timer("nova.resize_revert") - def _resize_revert(self, server, status="ACTIVE"): - server.revert_resize() - utils.wait_for_status( - server, - ready_statuses=[status], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_resize_revert_timeout, - check_interval=( - CONF.openstack.nova_server_resize_revert_poll_interval) - ) - - def _update_volume_resource(self, resource): - cinder_service = cinder_utils.CinderBasic(self.context) - return cinder_service.cinder.get_volume(resource.id) - - @atomic.action_timer("nova.attach_volume") - def _attach_volume(self, server, volume, device=None): - server_id = server.id - volume_id = volume.id - attachment = self.clients("nova").volumes.create_server_volume( - server_id, volume_id, device) - utils.wait_for_status( - volume, - ready_statuses=["in-use"], - update_resource=self._update_volume_resource, - timeout=CONF.openstack.nova_server_resize_revert_timeout, - check_interval=( - CONF.openstack.nova_server_resize_revert_poll_interval) - ) - return attachment - - @atomic.action_timer("nova.list_attachments") - def _list_attachments(self, server_id): - """Get a list of all the attached volumes for the given server ID. - - :param server_id: The ID of the server - :rtype: list of :class:`Volume` - """ - return self.clients("nova").volumes.get_server_volumes(server_id) - - @atomic.action_timer("nova.detach_volume") - def _detach_volume(self, server, volume, attachment=None): - """Detach volume from the server. - - :param server: A server object to detach volume from. - :param volume: A volume object to detach from the server. - :param attachment: DEPRECATED - """ - if attachment: - LOG.warning("An argument `attachment` of `_detach_volume` is " - "deprecated in favor of `volume` argument since " - "Rally 0.10.0") - - server_id = server.id - - self.clients("nova").volumes.delete_server_volume(server_id, - volume.id) - utils.wait_for_status( - volume, - ready_statuses=["available"], - update_resource=self._update_volume_resource, - timeout=CONF.openstack.nova_detach_volume_timeout, - check_interval=CONF.openstack.nova_detach_volume_poll_interval - ) - - @atomic.action_timer("nova.live_migrate") - def _live_migrate(self, server, block_migration=False, - disk_over_commit=False, skip_host_check=False): - """Run live migration of the given server. - - :param server: Server object - :param block_migration: Specifies the migration type - :param disk_over_commit: Specifies whether to overcommit migrated - instance or not - :param skip_host_check: Specifies whether to verify the targeted host - availability - """ - server_admin = self.admin_clients("nova").servers.get(server.id) - host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") - server_admin.live_migrate(block_migration=block_migration, - disk_over_commit=disk_over_commit) - utils.wait_for_status( - server, - ready_statuses=["ACTIVE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_live_migrate_timeout, - check_interval=( - CONF.openstack.nova_server_live_migrate_poll_interval) - ) - server_admin = self.admin_clients("nova").servers.get(server.id) - if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host") - and not skip_host_check): - raise exceptions.RallyException( - "Live Migration failed: Migration complete " - "but instance did not change host: %s" % host_pre_migrate) - - @atomic.action_timer("nova.migrate") - def _migrate(self, server, skip_host_check=False): - """Run migration of the given server. - - :param server: Server object - :param skip_host_check: Specifies whether to verify the targeted host - availability - """ - server_admin = self.admin_clients("nova").servers.get(server.id) - host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") - server_admin.migrate() - utils.wait_for_status( - server, - ready_statuses=["VERIFY_RESIZE"], - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.nova_server_migrate_timeout, - check_interval=( - CONF.openstack.nova_server_migrate_poll_interval) - ) - if not skip_host_check: - server_admin = self.admin_clients("nova").servers.get(server.id) - host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") - if host_pre_migrate == host_after_migrate: - raise exceptions.RallyException( - "Migration failed: Migration complete but instance" - " did not change host: %s" % host_pre_migrate) - - @atomic.action_timer("nova.add_server_secgroups") - def _add_server_secgroups(self, server, security_group, - atomic_action=False): - """add security group to a server. - - :param server: Server object - :returns: An instance of novaclient.base.DictWithMeta - """ - return self.clients("nova").servers.add_security_group(server, - security_group) - - @atomic.action_timer("nova.list_hypervisors") - def _list_hypervisors(self, detailed=True): - """List hypervisors.""" - return self.admin_clients("nova").hypervisors.list(detailed) - - @atomic.action_timer("nova.statistics_hypervisors") - def _statistics_hypervisors(self): - """Get hypervisor statistics over all compute nodes. - - :returns: Hypervisor statistics - """ - return self.admin_clients("nova").hypervisors.statistics() - - @atomic.action_timer("nova.get_hypervisor") - def _get_hypervisor(self, hypervisor): - """Get a specific hypervisor. - - :param hypervisor: Hypervisor to get. - :returns: Hypervisor object - """ - return self.admin_clients("nova").hypervisors.get(hypervisor) - - @atomic.action_timer("nova.search_hypervisors") - def _search_hypervisors(self, hypervisor_match, servers=False): - """List all servers belonging to specific hypervisor. - - :param hypervisor_match: Hypervisor's host name. - :param servers: If True, server information is also retrieved. - :returns: Hypervisor object - """ - return self.admin_clients("nova").hypervisors.search(hypervisor_match, - servers=servers) - - @atomic.action_timer("nova.lock_server") - def _lock_server(self, server): - """Lock the given server. - - :param server: Server to lock - """ - server.lock() - - @atomic.action_timer("nova.uptime_hypervisor") - def _uptime_hypervisor(self, hypervisor): - """Display the uptime of the specified hypervisor. - - :param hypervisor: Hypervisor to get. - :returns: Hypervisor object - """ - return self.admin_clients("nova").hypervisors.uptime(hypervisor) - - @atomic.action_timer("nova.unlock_server") - def _unlock_server(self, server): - """Unlock the given server. - - :param server: Server to unlock - """ - server.unlock() - - @atomic.action_timer("nova.delete_network") - def _delete_network(self, net_id): - """Delete nova network. - - :param net_id: The nova-network ID to delete - """ - return self.admin_clients("nova").networks.delete(net_id) - - @atomic.action_timer("nova.list_flavors") - def _list_flavors(self, detailed=True, **kwargs): - """List all flavors. - - :param kwargs: Optional additional arguments for flavor listing - :param detailed: True if the image listing - should contain detailed information - :returns: flavors list - """ - return self.clients("nova").flavors.list(detailed, **kwargs) - - @atomic.action_timer("nova.set_flavor_keys") - def _set_flavor_keys(self, flavor, extra_specs): - """set flavor keys - - :param flavor: flavor to set keys - :param extra_specs: additional arguments for flavor set keys - """ - return flavor.set_keys(extra_specs) - - @atomic.action_timer("nova.list_agents") - def _list_agents(self, hypervisor=None): - """List all nova-agent builds. - - :param hypervisor: The nova-hypervisor ID on which we need to list all - the builds - :returns: Nova-agent build list - """ - return self.admin_clients("nova").agents.list(hypervisor) - - @atomic.action_timer("nova.list_aggregates") - def _list_aggregates(self): - """Returns list of all os-aggregates.""" - return self.admin_clients("nova").aggregates.list() - - @atomic.action_timer("nova.list_availability_zones") - def _list_availability_zones(self, detailed=True): - """List availability-zones. - - :param detailed: True if the availability-zone listing should contain - detailed information - :returns: Availability-zone list - """ - return self.admin_clients("nova").availability_zones.list(detailed) - - @atomic.action_timer("nova.list_interfaces") - def _list_interfaces(self, server): - """List interfaces attached to a server. - - :param server:Instance or ID of server. - :returns: Server interface list - """ - return self.clients("nova").servers.interface_list(server) - - @atomic.action_timer("nova.list_services") - def _list_services(self, host=None, binary=None): - """return all nova service details - - :param host: List all nova services on host - :param binary: List all nova services matching given binary - """ - return self.admin_clients("nova").services.list(host, binary) - - @atomic.action_timer("nova.create_flavor") - def _create_flavor(self, ram, vcpus, disk, **kwargs): - """Create a flavor - - :param ram: Memory in MB for the flavor - :param vcpus: Number of VCPUs for the flavor - :param disk: Size of local disk in GB - :param kwargs: Optional additional arguments for flavor creation - """ - name = self.generate_random_name() - return self.admin_clients("nova").flavors.create(name, ram, vcpus, - disk, **kwargs) - - @atomic.action_timer("nova.delete_flavor") - def _delete_flavor(self, flavor): - """Delete a flavor - - :param flavor: The ID of the :class:`Flavor` - :returns: An instance of novaclient.base.TupleWithMeta - """ - return self.admin_clients("nova").flavors.delete(flavor) - - @atomic.action_timer("nova.list_flavor_access") - def _list_flavor_access(self, flavor): - """List access-rules for non-public flavor. - - :param flavor: List access rules for flavor instance or flavor ID - """ - return self.admin_clients("nova").flavor_access.list(flavor=flavor) - - @atomic.action_timer("nova.add_tenant_access") - def _add_tenant_access(self, flavor, tenant): - """Add a tenant to the given flavor access list. - - :param flavor: name or id of the object flavor - :param tenant: id of the object tenant - :returns: access rules for flavor instance or flavor ID - """ - return self.admin_clients("nova").flavor_access.add_tenant_access( - flavor, tenant) - - @atomic.action_timer("nova.update_server") - def _update_server(self, server, description=None): - """update the server's name and description. - - :param server: Server object - :param description: update the server description - :returns: The updated server - """ - new_name = self.generate_random_name() - if description: - return server.update(name=new_name, - description=description) - else: - return server.update(name=new_name) - - @atomic.action_timer("nova.get_flavor") - def _get_flavor(self, flavor_id): - """Show a flavor - - :param flavor_id: The flavor ID to get - """ - return self.admin_clients("nova").flavors.get(flavor_id) - - @atomic.action_timer("nova.create_aggregate") - def _create_aggregate(self, availability_zone): - """Create a new aggregate. - - :param availability_zone: The availability zone of the aggregate - :returns: The created aggregate - """ - aggregate_name = self.generate_random_name() - return self.admin_clients("nova").aggregates.create(aggregate_name, - availability_zone) - - @atomic.action_timer("nova.get_aggregate_details") - def _get_aggregate_details(self, aggregate): - """Get details of the specified aggregate. - - :param aggregate: The aggregate to get details - :returns: Detailed information of aggregate - """ - return self.admin_clients("nova").aggregates.get_details(aggregate) - - @atomic.action_timer("nova.delete_aggregate") - def _delete_aggregate(self, aggregate): - """Delete the specified aggregate. - - :param aggregate: The aggregate to delete - :returns: An instance of novaclient.base.TupleWithMeta - """ - return self.admin_clients("nova").aggregates.delete(aggregate) - - @atomic.action_timer("nova.bind_actions") - def _bind_actions(self): - actions = ["hard_reboot", "soft_reboot", "stop_start", - "rescue_unrescue", "pause_unpause", "suspend_resume", - "lock_unlock", "shelve_unshelve"] - action_builder = utils.ActionBuilder(actions) - action_builder.bind_action("hard_reboot", self._reboot_server) - action_builder.bind_action("soft_reboot", self._soft_reboot_server) - action_builder.bind_action("stop_start", - self._stop_and_start_server) - action_builder.bind_action("rescue_unrescue", - self._rescue_and_unrescue_server) - action_builder.bind_action("pause_unpause", - self._pause_and_unpause_server) - action_builder.bind_action("suspend_resume", - self._suspend_and_resume_server) - action_builder.bind_action("lock_unlock", - self._lock_and_unlock_server) - action_builder.bind_action("shelve_unshelve", - self._shelve_and_unshelve_server) - - return action_builder - - @atomic.action_timer("nova.stop_and_start_server") - def _stop_and_start_server(self, server): - """Stop and then start the given server. - - A stop will be issued on the given server upon which time - this method will wait for the server to become 'SHUTOFF'. - Once the server is SHUTOFF a start will be issued and this - method will wait for the server to become 'ACTIVE' again. - - :param server: The server to stop and then start. - - """ - self._stop_server(server) - self._start_server(server) - - @atomic.action_timer("nova.rescue_and_unrescue_server") - def _rescue_and_unrescue_server(self, server): - """Rescue and then unrescue the given server. - - A rescue will be issued on the given server upon which time - this method will wait for the server to become 'RESCUE'. - Once the server is RESCUE an unrescue will be issued and - this method will wait for the server to become 'ACTIVE' - again. - - :param server: The server to rescue and then unrescue. - - """ - self._rescue_server(server) - self._unrescue_server(server) - - @atomic.action_timer("nova.pause_and_unpause_server") - def _pause_and_unpause_server(self, server): - """Pause and then unpause the given server. - - A pause will be issued on the given server upon which time - this method will wait for the server to become 'PAUSED'. - Once the server is PAUSED an unpause will be issued and - this method will wait for the server to become 'ACTIVE' - again. - - :param server: The server to pause and then unpause. - - """ - self._pause_server(server) - self._unpause_server(server) - - @atomic.action_timer("nova.suspend_and_resume_server") - def _suspend_and_resume_server(self, server): - """Suspend and then resume the given server. - - A suspend will be issued on the given server upon which time - this method will wait for the server to become 'SUSPENDED'. - Once the server is SUSPENDED an resume will be issued and - this method will wait for the server to become 'ACTIVE' - again. - - :param server: The server to suspend and then resume. - - """ - self._suspend_server(server) - self._resume_server(server) - - @atomic.action_timer("nova.lock_and_unlock_server") - def _lock_and_unlock_server(self, server): - """Lock and then unlock the given server. - - A lock will be issued on the given server upon which time - this method will wait for the server to become locked'. - Once the server is locked an unlock will be issued. - - :param server: The server to lock and then unlock. - - """ - self._lock_server(server) - self._unlock_server(server) - - @atomic.action_timer("nova.shelve_and_unshelve_server") - def _shelve_and_unshelve_server(self, server): - """Shelve and then unshelve the given server. - - A shelve will be issued on the given server upon which time - this method will wait for the server to become 'SHELVED'. - Once the server is SHELVED an unshelve will be issued and - this method will wait for the server to become 'ACTIVE' - again. - - :param server: The server to shelve and then unshelve. - - """ - self._shelve_server(server) - self._unshelve_server(server) - - @atomic.action_timer("nova.update_aggregate") - def _update_aggregate(self, aggregate): - """Update the aggregate's name and availability_zone. - - :param aggregate: The aggregate to update - :return: The updated aggregate - """ - aggregate_name = self.generate_random_name() - availability_zone = self.generate_random_name() - values = {"name": aggregate_name, - "availability_zone": availability_zone} - return self.admin_clients("nova").aggregates.update(aggregate, - values) - - @atomic.action_timer("nova.aggregate_add_host") - def _aggregate_add_host(self, aggregate, host): - """Add a host into the Host Aggregate. - - :param aggregate: The aggregate add host to - :param host: The host add to aggregate - :returns: The aggregate that has been added host to - """ - return self.admin_clients("nova").aggregates.add_host(aggregate, - host) - - @atomic.action_timer("nova.aggregate_remove_host") - def _aggregate_remove_host(self, aggregate, host): - """Remove a host from an aggregate. - - :param aggregate: The aggregate remove host from - :param host: The host to remove - :returns: The aggregate that has been removed host from - """ - return self.admin_clients("nova").aggregates.remove_host(aggregate, - host) - - @atomic.action_timer("nova.aggregate_set_metadata") - def _aggregate_set_metadata(self, aggregate, metadata): - """Set metadata to an aggregate - - :param aggregate: The aggregate to set metadata to - :param metadata: The metadata to be set - :return: The aggregate that has the set metadata - """ - return self.admin_clients("nova").aggregates.set_metadata(aggregate, - metadata) - - @atomic.action_timer("nova.attach_interface") - def _attach_interface(self, server, port_id=None, - net_id=None, fixed_ip=None): - """Attach a network_interface to an instance. - - :param server: The :class:`Server` (or its ID) to attach to. - :param port_id: The port to attach. - :param network_id: the Network to attach - :param fixed_ip: the Fix_ip to attach - :returns the server that has attach interface - """ - return self.clients("nova").servers.interface_attach(server, - port_id, net_id, - fixed_ip) diff --git a/rally/plugins/openstack/scenarios/quotas/__init__.py b/rally/plugins/openstack/scenarios/quotas/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/quotas/quotas.py b/rally/plugins/openstack/scenarios/quotas/quotas.py deleted file mode 100644 index 742eadc995..0000000000 --- a/rally/plugins/openstack/scenarios/quotas/quotas.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2014: Kylin Cloud -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.quotas import utils -from rally.task import validation - -"""Scenarios for quotas.""" - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova.quotas"]}, - name="Quotas.nova_update", - platform="openstack") -class NovaUpdate(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update quotas for Nova. - - :param max_quota: Max value to be updated for quota. - """ - - self._update_quotas("nova", self.context["tenant"]["id"], - max_quota) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova.quotas"]}, - name="Quotas.nova_update_and_delete", platform="openstack") -class NovaUpdateAndDelete(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update and delete quotas for Nova. - - :param max_quota: Max value to be updated for quota. - """ - - self._update_quotas("nova", self.context["tenant"]["id"], - max_quota) - self._delete_quotas("nova", self.context["tenant"]["id"]) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder.quotas"]}, - name="Quotas.cinder_update", platform="openstack") -class CinderUpdate(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update quotas for Cinder. - - :param max_quota: Max value to be updated for quota. - """ - - self._update_quotas("cinder", self.context["tenant"]["id"], - max_quota) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder.quotas"]}, - name="Quotas.cinder_get", platform="openstack") -class CinderGet(utils.QuotasScenario): - - def run(self): - """Get quotas for Cinder. - - Measure the "cinder quota-show" command performance - - """ - self._get_quotas("cinder", self.context["tenant"]["id"]) - - -@validation.add("required_services", services=[consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup@openstack": ["cinder.quotas"]}, - name="Quotas.cinder_update_and_delete", - platform="openstack") -class CinderUpdateAndDelete(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update and Delete quotas for Cinder. - - :param max_quota: Max value to be updated for quota. - """ - - self._update_quotas("cinder", self.context["tenant"]["id"], - max_quota) - self._delete_quotas("cinder", self.context["tenant"]["id"]) - - -@validation.add("required_services", - services=[consts.Service.NEUTRON]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup@openstack": ["neutron.quota"]}, - name="Quotas.neutron_update", platform="openstack") -class NeutronUpdate(utils.QuotasScenario): - - def run(self, max_quota=1024): - """Update quotas for neutron. - - :param max_quota: Max value to be updated for quota. - """ - - quota_update_fn = self.admin_clients("neutron").update_quota - self._update_quotas("neutron", self.context["tenant"]["id"], - max_quota, quota_update_fn) - - -@validation.add("required_services", services=[consts.Service.NOVA]) -@validation.add("required_platform", platform="openstack", - admin=True, users=True) -@scenario.configure(context={"admin_cleanup@openstack": ["nova.quotas"]}, - name="Quotas.nova_get", platform="openstack") -class NovaGet(utils.QuotasScenario): - - def run(self): - """Get quotas for nova.""" - - self._get_quotas("nova", self.context["tenant"]["id"]) diff --git a/rally/plugins/openstack/scenarios/quotas/utils.py b/rally/plugins/openstack/scenarios/quotas/utils.py deleted file mode 100644 index 5c687bdfb2..0000000000 --- a/rally/plugins/openstack/scenarios/quotas/utils.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2014: Kylin Cloud -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.plugins.openstack import scenario -from rally.task import atomic - - -class QuotasScenario(scenario.OpenStackScenario): - """Base class for quotas scenarios with basic atomic actions.""" - - @atomic.action_timer("quotas.update_quotas") - def _update_quotas(self, component, tenant_id, max_quota=1024, - quota_update_fn=None): - """Updates quotas. - - :param component: Component for the quotas. - :param tenant_id: The project_id for the quotas to be updated. - :param max_quota: Max value to be updated for quota. - :param quota_update_fn: Client quota update function. - - Standard OpenStack clients use quotas.update(). - Use `quota_update_fn` to override for non-standard clients. - - :returns: Updated quotas dictionary. - """ - quotas = self._generate_quota_values(max_quota, component) - if quota_update_fn: - return quota_update_fn(tenant_id, **quotas) - return self.admin_clients(component).quotas.update(tenant_id, **quotas) - - @atomic.action_timer("quotas.delete_quotas") - def _delete_quotas(self, component, tenant_id): - """Delete quotas. - - :param component: Component for the quotas. - :param tenant_id: The project_id for the quotas to be updated. - """ - self.admin_clients(component).quotas.delete(tenant_id) - - def _generate_quota_values(self, max_quota, component): - quotas = {} - if component == "nova": - quotas = { - "metadata_items": random.randint(-1, max_quota), - "key_pairs": random.randint(-1, max_quota), - "injected_file_content_bytes": random.randint(-1, max_quota), - "injected_file_path_bytes": random.randint(-1, max_quota), - "ram": random.randint(-1, max_quota), - "instances": random.randint(-1, max_quota), - "injected_files": random.randint(-1, max_quota), - "cores": random.randint(-1, max_quota) - } - elif component == "cinder": - quotas = { - "volumes": random.randint(-1, max_quota), - "snapshots": random.randint(-1, max_quota), - "gigabytes": random.randint(-1, max_quota), - } - elif component == "neutron": - quota = {} - for key in ["network", "subnet", "port", "router", "floatingip", - "security_group", "security_group_rule"]: - quota[key] = random.randint(-1, max_quota) - quotas = {"body": {"quota": quota}} - return quotas - - @atomic.action_timer("quotas.get_quotas") - def _get_quotas(self, component, tenant_id): - """Get quotas for a project. - - :param component: Openstack component for the quotas. - :param tenant_id: The project_id for the quotas to show. - :return: Get quotas for a project. - """ - return self.admin_clients(component).quotas.get(tenant_id) diff --git a/rally/plugins/openstack/scenarios/sahara/__init__.py b/rally/plugins/openstack/scenarios/sahara/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/sahara/clusters.py b/rally/plugins/openstack/scenarios/sahara/clusters.py deleted file mode 100644 index 08e8ba2e6c..0000000000 --- a/rally/plugins/openstack/scenarios/sahara/clusters.py +++ /dev/null @@ -1,231 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import types -from rally.task import validation - -LOG = logging.getLogger(__name__) - -"""Scenarios for Sahara clusters.""" - - -@types.convert(flavor={"type": "nova_flavor"}, - master_flavor={"type": "nova_flavor"}, - worker_flavor={"type": "nova_flavor"}, - neutron_net={"type": "neutron_network"}, - floating_ip_pool={"type": "neutron_network"}) -@validation.add("flavor_exists", param_name="master_flavor") -@validation.add("flavor_exists", param_name="worker_flavor") -@validation.add("required_contexts", contexts=["users", "sahara_image"]) -@validation.add("number", param_name="workers_count", minval=1, - integer_only=True) -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["sahara"]}, - name="SaharaClusters.create_and_delete_cluster", - platform="openstack") -class CreateAndDeleteCluster(utils.SaharaScenario): - - def run(self, workers_count, plugin_name, hadoop_version, - master_flavor=None, worker_flavor=None, flavor=None, - floating_ip_pool=None, volumes_per_node=None, - volumes_size=None, auto_security_group=None, - security_groups=None, node_configs=None, - cluster_configs=None, enable_anti_affinity=False, - enable_proxy=False, use_autoconfig=True): - """Launch and delete a Sahara Cluster. - - This scenario launches a Hadoop cluster, waits until it becomes - 'Active' and deletes it. - - :param flavor: Nova flavor that will be for nodes in the - created node groups. Deprecated. - :param master_flavor: Nova flavor that will be used for the master - instance of the cluster - :param worker_flavor: Nova flavor that will be used for the workers of - the cluster - :param workers_count: number of worker instances in a cluster - :param plugin_name: name of a provisioning plugin - :param hadoop_version: version of Hadoop distribution supported by - the specified plugin. - :param floating_ip_pool: floating ip pool name from which Floating - IPs will be allocated. Sahara will determine - automatically how to treat this depending on - its own configurations. Defaults to None - because in some cases Sahara may work w/o - Floating IPs. - :param volumes_per_node: number of Cinder volumes that will be - attached to every cluster node - :param volumes_size: size of each Cinder volume in GB - :param auto_security_group: boolean value. If set to True Sahara will - create a Security Group for each Node Group - in the Cluster automatically. - :param security_groups: list of security groups that will be used - while creating VMs. If auto_security_group - is set to True, this list can be left empty. - :param node_configs: config dict that will be passed to each Node - Group - :param cluster_configs: config dict that will be passed to the - Cluster - :param enable_anti_affinity: If set to true the vms will be scheduled - one per compute node. - :param enable_proxy: Use Master Node of a Cluster as a Proxy node and - do not assign floating ips to workers. - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - """ - - image_id = self.context["tenant"]["sahara"]["image"] - - LOG.debug("Using Image: %s" % image_id) - - cluster = self._launch_cluster( - flavor_id=flavor, - master_flavor_id=master_flavor, - worker_flavor_id=worker_flavor, - image_id=image_id, - workers_count=workers_count, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - floating_ip_pool=floating_ip_pool, - volumes_per_node=volumes_per_node, - volumes_size=volumes_size, - auto_security_group=auto_security_group, - security_groups=security_groups, - node_configs=node_configs, - cluster_configs=cluster_configs, - enable_anti_affinity=enable_anti_affinity, - enable_proxy=enable_proxy, - use_autoconfig=use_autoconfig) - - self._delete_cluster(cluster) - - -@types.convert(flavor={"type": "nova_flavor"}, - master_flavor={"type": "nova_flavor"}, - worker_flavor={"type": "nova_flavor"}) -@validation.add("flavor_exists", param_name="master_flavor") -@validation.add("flavor_exists", param_name="worker_flavor") -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_contexts", contexts=["users", "sahara_image"]) -@validation.add("number", param_name="workers_count", minval=1, - integer_only=True) -@scenario.configure(context={"cleanup@openstack": ["sahara"]}, - name="SaharaClusters.create_scale_delete_cluster", - platform="openstack") -class CreateScaleDeleteCluster(utils.SaharaScenario): - - def run(self, master_flavor, worker_flavor, workers_count, - plugin_name, hadoop_version, deltas, flavor=None, - floating_ip_pool=None, volumes_per_node=None, - volumes_size=None, auto_security_group=None, - security_groups=None, node_configs=None, - cluster_configs=None, enable_anti_affinity=False, - enable_proxy=False, use_autoconfig=True): - """Launch, scale and delete a Sahara Cluster. - - This scenario launches a Hadoop cluster, waits until it becomes - 'Active'. Then a series of scale operations is applied. The scaling - happens according to numbers listed in :param deltas. Ex. if - deltas is set to [2, -2] it means that the first scaling operation will - add 2 worker nodes to the cluster and the second will remove two. - - :param flavor: Nova flavor that will be for nodes in the - created node groups. Deprecated. - :param master_flavor: Nova flavor that will be used for the master - instance of the cluster - :param worker_flavor: Nova flavor that will be used for the workers of - the cluster - :param workers_count: number of worker instances in a cluster - :param plugin_name: name of a provisioning plugin - :param hadoop_version: version of Hadoop distribution supported by - the specified plugin. - :param deltas: list of integers which will be used to add or - remove worker nodes from the cluster - :param floating_ip_pool: floating ip pool name from which Floating - IPs will be allocated. Sahara will determine - automatically how to treat this depending on - its own configurations. Defaults to None - because in some cases Sahara may work w/o - Floating IPs. - :param neutron_net_id: id of a Neutron network that will be used - for fixed IPs. This parameter is ignored when - Nova Network is set up. - :param volumes_per_node: number of Cinder volumes that will be - attached to every cluster node - :param volumes_size: size of each Cinder volume in GB - :param auto_security_group: boolean value. If set to True Sahara will - create a Security Group for each Node Group - in the Cluster automatically. - :param security_groups: list of security groups that will be used - while creating VMs. If auto_security_group - is set to True this list can be left empty. - :param node_configs: configs dict that will be passed to each Node - Group - :param cluster_configs: configs dict that will be passed to the - Cluster - :param enable_anti_affinity: If set to true the vms will be scheduled - one per compute node. - :param enable_proxy: Use Master Node of a Cluster as a Proxy node and - do not assign floating ips to workers. - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - """ - - image_id = self.context["tenant"]["sahara"]["image"] - - LOG.debug("Using Image: %s" % image_id) - - cluster = self._launch_cluster( - flavor_id=flavor, - master_flavor_id=master_flavor, - worker_flavor_id=worker_flavor, - image_id=image_id, - workers_count=workers_count, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - floating_ip_pool=floating_ip_pool, - volumes_per_node=volumes_per_node, - volumes_size=volumes_size, - auto_security_group=auto_security_group, - security_groups=security_groups, - node_configs=node_configs, - cluster_configs=cluster_configs, - enable_anti_affinity=enable_anti_affinity, - enable_proxy=enable_proxy, - use_autoconfig=use_autoconfig) - - for delta in deltas: - # The Cluster is fetched every time so that its node groups have - # correct 'count' values. - cluster = self.clients("sahara").clusters.get(cluster.id) - - if delta == 0: - # Zero scaling makes no sense. - continue - elif delta > 0: - self._scale_cluster_up(cluster, delta) - elif delta < 0: - self._scale_cluster_down(cluster, delta) - - self._delete_cluster(cluster) diff --git a/rally/plugins/openstack/scenarios/sahara/consts.py b/rally/plugins/openstack/scenarios/sahara/consts.py deleted file mode 100644 index c05c3e869e..0000000000 --- a/rally/plugins/openstack/scenarios/sahara/consts.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -NODE_PROCESSES = { - "vanilla": { - "1.2.1": { - "master": ["namenode", "jobtracker", "oozie"], - "worker": ["datanode", "tasktracker"] - }, - "2.3.0": { - "master": ["namenode", "resourcemanager", "historyserver", - "oozie"], - "worker": ["datanode", "nodemanager"] - }, - "2.4.1": { - "master": ["namenode", "resourcemanager", "historyserver", - "oozie"], - "worker": ["datanode", "nodemanager"] - }, - "2.6.0": { - "master": ["namenode", "resourcemanager", "historyserver", - "oozie"], - "worker": ["datanode", "nodemanager"] - }, - "2.7.1": { - "master": ["namenode", "resourcemanager", "historyserver", - "oozie"], - "worker": ["datanode", "nodemanager"] - } - }, - "hdp": { - "1.3.2": { - "master": ["JOBTRACKER", "NAMENODE", "SECONDARY_NAMENODE", - "GANGLIA_SERVER", "NAGIOS_SERVER", - "AMBARI_SERVER", "OOZIE_SERVER"], - "worker": ["TASKTRACKER", "DATANODE", "HDFS_CLIENT", - "MAPREDUCE_CLIENT", "OOZIE_CLIENT", "PIG"] - }, - "2.0.6": { - "manager": ["AMBARI_SERVER", "GANGLIA_SERVER", - "NAGIOS_SERVER"], - "master": ["NAMENODE", "SECONDARY_NAMENODE", - "ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", - "HISTORYSERVER", "RESOURCEMANAGER", - "OOZIE_SERVER"], - "worker": ["DATANODE", "HDFS_CLIENT", "ZOOKEEPER_CLIENT", - "PIG", "MAPREDUCE2_CLIENT", "YARN_CLIENT", - "NODEMANAGER", "OOZIE_CLIENT"] - }, - "2.2": { - "manager": ["AMBARI_SERVER", "GANGLIA_SERVER", - "NAGIOS_SERVER"], - "master": ["NAMENODE", "SECONDARY_NAMENODE", - "ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT", - "HISTORYSERVER", "RESOURCEMANAGER", - "OOZIE_SERVER"], - "worker": ["DATANODE", "HDFS_CLIENT", "ZOOKEEPER_CLIENT", - "PIG", "MAPREDUCE2_CLIENT", "YARN_CLIENT", - "NODEMANAGER", "OOZIE_CLIENT", "TEZ_CLIENT"] - } - }, - "cdh": { - "5": { - "manager": ["CLOUDERA_MANAGER"], - "master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER", - "OOZIE_SERVER", "YARN_JOBHISTORY", - "HDFS_SECONDARYNAMENODE", "HIVE_METASTORE", - "HIVE_SERVER2"], - "worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"] - }, - "5.4.0": { - "manager": ["CLOUDERA_MANAGER"], - "master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER", - "OOZIE_SERVER", "YARN_JOBHISTORY", - "HDFS_SECONDARYNAMENODE", "HIVE_METASTORE", - "HIVE_SERVER2"], - "worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"] - }, - "5.5.0": { - "manager": ["CLOUDERA_MANAGER"], - "master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER", - "OOZIE_SERVER", "YARN_JOBHISTORY", - "HDFS_SECONDARYNAMENODE", "HIVE_METASTORE", - "HIVE_SERVER2"], - "worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"] - } - }, - "spark": { - "1.3.1": { - "master": ["namenode", "master"], - "worker": ["datanode", "slave"] - }, - "1.6.0": { - "master": ["namenode", "master"], - "worker": ["datanode", "slave"] - } - }, - "ambari": { - "2.3": { - "master-edp": ["Hive Metastore", "HiveServer", "Oozie"], - "master": ["Ambari", "MapReduce History Server", - "Spark History Server", "NameNode", "ResourceManager", - "SecondaryNameNode", "YARN Timeline Server", - "ZooKeeper"], - "worker": ["DataNode", "NodeManager"] - } - }, - "mapr": { - "5.0.0.mrv2": { - "master": ["Metrics", "Webserver", "Zookeeper", "HTTPFS", - "Oozie", "FileServer", "CLDB", "Flume", "Hue", - "NodeManager", "HistoryServer", "ResourseManager", - "HiveServer2", "HiveMetastore", "Sqoop2-Client", - "Sqoop2-Server"], - "worker": ["NodeManager", "FileServer"] - }, - "5.1.0.mrv2": { - "master": ["Metrics", "Webserver", "Zookeeper", "HTTPFS", - "Oozie", "FileServer", "CLDB", "Flume", "Hue", - "NodeManager", "HistoryServer", "ResourseManager", - "HiveServer2", "HiveMetastore", "Sqoop2-Client", - "Sqoop2-Server"], - "worker": ["NodeManager", "FileServer"] - } - } -} - -REPLICATION_CONFIGS = { - "vanilla": { - "1.2.1": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.3.0": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.4.1": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.6.0": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.7.1": { - "target": "HDFS", - "config_name": "dfs.replication" - } - }, - "hdp": { - "1.3.2": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.0.6": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "2.2": { - "target": "HDFS", - "config_name": "dfs.replication" - } - }, - "cdh": { - "5": { - "target": "HDFS", - "config_name": "dfs_replication" - }, - "5.4.0": { - "target": "HDFS", - "config_name": "dfs_replication" - }, - "5.5.0": { - "target": "HDFS", - "config_name": "dfs_replication" - } - }, - "spark": { - "1.3.1": { - "target": "HDFS", - "config_name": "dfs_replication" - }, - "1.6.0": { - "target": "HDFS", - "config_name": "dfs_replication" - } - }, - "ambari": { - "2.3": { - "target": "HDFS", - "config_name": "dfs_replication" - } - }, - "mapr": { - "5.0.0.mrv2": { - "target": "HDFS", - "config_name": "dfs.replication" - }, - "5.1.0.mrv2": { - "target": "HDFS", - "config_name": "dfs.replication" - } - } - -} - -ANTI_AFFINITY_PROCESSES = { - "vanilla": { - "1.2.1": ["datanode"], - "2.3.0": ["datanode"], - "2.4.1": ["datanode"], - "2.6.0": ["datanode"], - "2.7.1": ["datanode"] - }, - "hdp": { - "1.3.2": ["DATANODE"], - "2.0.6": ["DATANODE"], - "2.2": ["DATANODE"] - }, - "cdh": { - "5": ["HDFS_DATANODE"], - "5.4.0": ["HDFS_DATANODE"], - "5.5.0": ["HDFS_DATANODE"] - }, - "spark": { - "1.3.1": ["datanode"], - "1.6.0": ["datanode"] - }, - "ambari": { - "2.3": ["DataNode"], - }, - "mapr": { - "5.0.0.mrv2": ["FileServer"], - "5.1.0.mrv2": ["FileServer"], - } -} diff --git a/rally/plugins/openstack/scenarios/sahara/jobs.py b/rally/plugins/openstack/scenarios/sahara/jobs.py deleted file mode 100644 index dc9d7dc6c2..0000000000 --- a/rally/plugins/openstack/scenarios/sahara/jobs.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import validation - -LOG = logging.getLogger(__name__) - - -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_contexts", contexts=["users", "sahara_image", - "sahara_job_binaries", - "sahara_cluster"]) -@scenario.configure(context={"cleanup@openstack": ["sahara"]}, - name="SaharaJob.create_launch_job", - platform="openstack") -class CreateLaunchJob(utils.SaharaScenario): - - def run(self, job_type, configs, job_idx=0): - """Create and execute a Sahara EDP Job. - - This scenario Creates a Job entity and launches an execution on a - Cluster. - - :param job_type: type of the Data Processing Job - :param configs: config dict that will be passed to a Job Execution - :param job_idx: index of a job in a sequence. This index will be - used to create different atomic actions for each job - in a sequence - """ - - mains = self.context["tenant"]["sahara"]["mains"] - libs = self.context["tenant"]["sahara"]["libs"] - - name = self.generate_random_name() - job = self.clients("sahara").jobs.create(name=name, - type=job_type, - description="", - mains=mains, - libs=libs) - - cluster_id = self.context["tenant"]["sahara"]["cluster"] - - if job_type.lower() == "java": - input_id = None - output_id = None - else: - input_id = self.context["tenant"]["sahara"]["input"] - output_id = self._create_output_ds().id - - self._run_job_execution(job_id=job.id, - cluster_id=cluster_id, - input_id=input_id, - output_id=output_id, - configs=configs, - job_idx=job_idx) - - -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_contexts", contexts=["users", "sahara_image", - "sahara_job_binaries", - "sahara_cluster"]) -@scenario.configure(context={"cleanup@openstack": ["sahara"]}, - name="SaharaJob.create_launch_job_sequence", - platform="openstack") -class CreateLaunchJobSequence(utils.SaharaScenario): - - def run(self, jobs): - """Create and execute a sequence of the Sahara EDP Jobs. - - This scenario Creates a Job entity and launches an execution on a - Cluster for every job object provided. - - :param jobs: list of jobs that should be executed in one context - """ - - launch_job = CreateLaunchJob(self.context) - - for idx, job in enumerate(jobs): - LOG.debug("Launching Job. Sequence #%d" % idx) - launch_job.run(job["job_type"], job["configs"], idx) - - -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_contexts", contexts=["users", "sahara_image", - "sahara_job_binaries", - "sahara_cluster"]) -@scenario.configure(context={"cleanup@openstack": ["sahara"]}, - name="SaharaJob.create_launch_job_sequence_with_scaling", - platform="openstack") -class CreateLaunchJobSequenceWithScaling(utils.SaharaScenario,): - - def run(self, jobs, deltas): - """Create and execute Sahara EDP Jobs on a scaling Cluster. - - This scenario Creates a Job entity and launches an execution on a - Cluster for every job object provided. The Cluster is scaled according - to the deltas values and the sequence is launched again. - - :param jobs: list of jobs that should be executed in one context - :param deltas: list of integers which will be used to add or - remove worker nodes from the cluster - """ - - cluster_id = self.context["tenant"]["sahara"]["cluster"] - - launch_job_sequence = CreateLaunchJobSequence(self.context) - launch_job_sequence.run(jobs) - - for delta in deltas: - # The Cluster is fetched every time so that its node groups have - # correct 'count' values. - cluster = self.clients("sahara").clusters.get(cluster_id) - - LOG.debug("Scaling cluster %s with delta %d" - % (cluster.name, delta)) - if delta == 0: - # Zero scaling makes no sense. - continue - elif delta > 0: - self._scale_cluster_up(cluster, delta) - elif delta < 0: - self._scale_cluster_down(cluster, delta) - - LOG.debug("Starting Job sequence") - launch_job_sequence.run(jobs) diff --git a/rally/plugins/openstack/scenarios/sahara/node_group_templates.py b/rally/plugins/openstack/scenarios/sahara/node_group_templates.py deleted file mode 100644 index ceb3d6bc83..0000000000 --- a/rally/plugins/openstack/scenarios/sahara/node_group_templates.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.sahara import utils -from rally.task import types -from rally.task import validation - -"""Scenarios for Sahara node group templates.""" - - -@types.convert(flavor={"type": "nova_flavor"}) -@validation.add("flavor_exists", param_name="flavor") -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["sahara"]}, - name="SaharaNodeGroupTemplates.create_and_list_node_group_templates", - platform="openstack") -class CreateAndListNodeGroupTemplates(utils.SaharaScenario): - - def run(self, flavor, plugin_name="vanilla", - hadoop_version="1.2.1", use_autoconfig=True): - """Create and list Sahara Node Group Templates. - - This scenario creates two Node Group Templates with different set of - node processes. The master Node Group Template contains Hadoop's - management processes. The worker Node Group Template contains - Hadoop's worker processes. - - By default the templates are created for the vanilla Hadoop - provisioning plugin using the version 1.2.1 - - After the templates are created the list operation is called. - - :param flavor: Nova flavor that will be for nodes in the - created node groups - :param plugin_name: name of a provisioning plugin - :param hadoop_version: version of Hadoop distribution supported by - the specified plugin. - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - """ - - self._create_master_node_group_template(flavor_id=flavor, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - use_autoconfig=use_autoconfig) - self._create_worker_node_group_template(flavor_id=flavor, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - use_autoconfig=use_autoconfig) - self._list_node_group_templates() - - -@types.convert(flavor={"type": "nova_flavor"}) -@validation.add("flavor_exists", param_name="flavor") -@validation.add("required_services", services=[consts.Service.SAHARA]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["sahara"]}, - name="SaharaNodeGroupTemplates.create_delete_node_group_templates", - platform="openstack") -class CreateDeleteNodeGroupTemplates(utils.SaharaScenario): - - def run(self, flavor, plugin_name="vanilla", - hadoop_version="1.2.1", use_autoconfig=True): - """Create and delete Sahara Node Group Templates. - - This scenario creates and deletes two most common types of - Node Group Templates. - - By default the templates are created for the vanilla Hadoop - provisioning plugin using the version 1.2.1 - - :param flavor: Nova flavor that will be for nodes in the - created node groups - :param plugin_name: name of a provisioning plugin - :param hadoop_version: version of Hadoop distribution supported by - the specified plugin. - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - """ - - master_ngt = self._create_master_node_group_template( - flavor_id=flavor, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - use_autoconfig=use_autoconfig) - - worker_ngt = self._create_worker_node_group_template( - flavor_id=flavor, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - use_autoconfig=use_autoconfig) - - self._delete_node_group_template(master_ngt) - self._delete_node_group_template(worker_ngt) diff --git a/rally/plugins/openstack/scenarios/sahara/utils.py b/rally/plugins/openstack/scenarios/sahara/utils.py deleted file mode 100644 index 3f150f8d7c..0000000000 --- a/rally/plugins/openstack/scenarios/sahara/utils.py +++ /dev/null @@ -1,588 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from saharaclient.api import base as sahara_base - -from rally.common import cfg -from rally.common import logging -from rally.common import utils as rutils -from rally import consts -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.sahara import consts as sahara_consts -from rally.task import atomic -from rally.task import utils -from rally.utils import strutils - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -class SaharaScenario(scenario.OpenStackScenario): - """Base class for Sahara scenarios with basic atomic actions.""" - - # NOTE(sskripnick): Some sahara resource names are validated as hostnames. - # Since underscores are not allowed in hostnames we should not use them. - RESOURCE_NAME_FORMAT = "rally-sahara-XXXXXX-XXXXXXXXXXXXXXXX" - - @atomic.action_timer("sahara.list_node_group_templates") - def _list_node_group_templates(self): - """Return user Node Group Templates list.""" - return self.clients("sahara").node_group_templates.list() - - @atomic.action_timer("sahara.create_master_node_group_template") - def _create_master_node_group_template(self, flavor_id, plugin_name, - hadoop_version, - use_autoconfig=True): - """Create a master Node Group Template with a random name. - - :param flavor_id: The required argument for the Template - :param plugin_name: Sahara provisioning plugin name - :param hadoop_version: The version of Hadoop distribution supported by - the plugin - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - :returns: The created Template - """ - name = self.generate_random_name() - - return self.clients("sahara").node_group_templates.create( - name=name, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - flavor_id=flavor_id, - node_processes=sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["master"], - use_autoconfig=use_autoconfig) - - @atomic.action_timer("sahara.create_worker_node_group_template") - def _create_worker_node_group_template(self, flavor_id, plugin_name, - hadoop_version, use_autoconfig): - """Create a worker Node Group Template with a random name. - - :param flavor_id: The required argument for the Template - :param plugin_name: Sahara provisioning plugin name - :param hadoop_version: The version of Hadoop distribution supported by - the plugin - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - :returns: The created Template - """ - name = self.generate_random_name() - - return self.clients("sahara").node_group_templates.create( - name=name, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - flavor_id=flavor_id, - node_processes=sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["worker"], - use_autoconfig=use_autoconfig) - - @atomic.action_timer("sahara.delete_node_group_template") - def _delete_node_group_template(self, node_group): - """Delete a Node Group Template by id. - - :param node_group: The Node Group Template to be deleted - """ - self.clients("sahara").node_group_templates.delete(node_group.id) - - def _wait_active(self, cluster_object): - utils.wait_for_status( - resource=cluster_object, ready_statuses=["active"], - failure_statuses=["error"], update_resource=self._update_cluster, - timeout=CONF.openstack.sahara_cluster_create_timeout, - check_interval=CONF.openstack.sahara_cluster_check_interval) - - def _setup_neutron_floating_ip_pool(self, name_or_id): - if name_or_id: - if strutils.is_uuid_like(name_or_id): - # Looks like an id is provided Return as is. - return name_or_id - else: - # It's a name. Changing to id. - for net in self.clients("neutron").list_networks()["networks"]: - if net["name"] == name_or_id: - return net["id"] - # If the name is not found in the list. Exit with error. - raise exceptions.ContextSetupFailure( - ctx_name=self.get_name(), - msg="Could not resolve Floating IP Pool name %s to id" - % name_or_id) - else: - # Pool is not provided. Using the one set as GW for current router. - - net = self.context["tenant"]["networks"][0] - router_id = net["router_id"] - router = self.clients("neutron").show_router(router_id)["router"] - net_id = router["external_gateway_info"]["network_id"] - - return net_id - - def _setup_nova_floating_ip_pool(self, name): - if name: - # The name is provided returning it as is. - return name - else: - # The name is not provided. Discovering - LOG.debug("No Floating Ip Pool provided. Taking random.") - pools = self.clients("nova").floating_ip_pools.list() - - if pools: - return random.choice(pools).name - else: - LOG.warning("No Floating Ip Pools found. This may cause " - "instances to be unreachable.") - return None - - def _setup_floating_ip_pool(self, node_groups, floating_ip_pool, - enable_proxy): - if consts.Service.NEUTRON in self.clients("services").values(): - LOG.debug("Neutron detected as networking backend.") - floating_ip_pool_value = self._setup_neutron_floating_ip_pool( - floating_ip_pool) - else: - LOG.debug("Nova Network detected as networking backend.") - floating_ip_pool_value = self._setup_nova_floating_ip_pool( - floating_ip_pool) - - if floating_ip_pool_value: - LOG.debug("Using floating ip pool %s." % floating_ip_pool_value) - # If the pool is set by any means assign it to all node groups. - # If the proxy node feature is enabled, Master Node Group and - # Proxy Workers should have a floating ip pool set up - - if enable_proxy: - proxy_groups = [x for x in node_groups - if x["name"] in ("master-ng", "proxy-ng")] - for ng in proxy_groups: - ng["is_proxy_gateway"] = True - ng["floating_ip_pool"] = floating_ip_pool_value - else: - for ng in node_groups: - ng["floating_ip_pool"] = floating_ip_pool_value - - return node_groups - - def _setup_volumes(self, node_groups, volumes_per_node, volumes_size): - if volumes_per_node: - LOG.debug("Adding volumes config to Node Groups") - for ng in node_groups: - ng_name = ng["name"] - if "worker" in ng_name or "proxy" in ng_name: - # NOTE: Volume storage is used only by HDFS Datanode - # process which runs on workers and proxies. - - ng["volumes_per_node"] = volumes_per_node - ng["volumes_size"] = volumes_size - - return node_groups - - def _setup_security_groups(self, node_groups, auto_security_group, - security_groups): - if auto_security_group: - LOG.debug("Auto security group enabled. Adding to Node Groups.") - if security_groups: - LOG.debug("Adding provided Security Groups to Node Groups.") - - for ng in node_groups: - if auto_security_group: - ng["auto_security_group"] = auto_security_group - if security_groups: - ng["security_groups"] = security_groups - - return node_groups - - def _setup_node_configs(self, node_groups, node_configs): - if node_configs: - LOG.debug("Adding Hadoop configs to Node Groups") - for ng in node_groups: - ng["node_configs"] = node_configs - - return node_groups - - def _setup_node_autoconfig(self, node_groups, node_autoconfig): - LOG.debug("Adding auto-config par to Node Groups") - for ng in node_groups: - ng["use_autoconfig"] = node_autoconfig - - return node_groups - - def _setup_replication_config(self, hadoop_version, workers_count, - plugin_name): - replication_value = min(workers_count, 3) - # 3 is a default Hadoop replication - conf = sahara_consts.REPLICATION_CONFIGS[plugin_name][hadoop_version] - LOG.debug("Using replication factor: %s" % replication_value) - replication_config = { - conf["target"]: { - conf["config_name"]: replication_value - } - } - return replication_config - - @logging.log_deprecated_args("`flavor_id` argument is deprecated. Use " - "`master_flavor_id` and `worker_flavor_id` " - "parameters.", rally_version="2.0", - deprecated_args=["flavor_id"]) - @atomic.action_timer("sahara.launch_cluster") - def _launch_cluster(self, plugin_name, hadoop_version, master_flavor_id, - worker_flavor_id, image_id, workers_count, - flavor_id=None, - floating_ip_pool=None, volumes_per_node=None, - volumes_size=None, auto_security_group=None, - security_groups=None, node_configs=None, - cluster_configs=None, enable_anti_affinity=False, - enable_proxy=False, - wait_active=True, - use_autoconfig=True): - """Create a cluster and wait until it becomes Active. - - The cluster is created with two node groups. The master Node Group is - created with one instance. The worker node group contains - node_count - 1 instances. - - :param plugin_name: provisioning plugin name - :param hadoop_version: Hadoop version supported by the plugin - :param master_flavor_id: flavor which will be used to create master - instance - :param worker_flavor_id: flavor which will be used to create workers - :param image_id: image id that will be used to boot instances - :param workers_count: number of worker instances. All plugins will - also add one Master instance and some plugins - add a Manager instance. - :param floating_ip_pool: floating ip pool name from which Floating - IPs will be allocated - :param volumes_per_node: number of Cinder volumes that will be - attached to every cluster node - :param volumes_size: size of each Cinder volume in GB - :param auto_security_group: boolean value. If set to True Sahara will - create a Security Group for each Node Group - in the Cluster automatically. - :param security_groups: list of security groups that will be used - while creating VMs. If auto_security_group is - set to True, this list can be left empty. - :param node_configs: configs dict that will be passed to each Node - Group - :param cluster_configs: configs dict that will be passed to the - Cluster - :param enable_anti_affinity: If set to true the vms will be scheduled - one per compute node. - :param enable_proxy: Use Master Node of a Cluster as a Proxy node and - do not assign floating ips to workers. - :param wait_active: Wait until a Cluster gets int "Active" state - :param use_autoconfig: If True, instances of the node group will be - automatically configured during cluster - creation. If False, the configuration values - should be specify manually - :returns: created cluster - """ - - if enable_proxy: - proxies_count = int( - workers_count / CONF.openstack.sahara_workers_per_proxy) - else: - proxies_count = 0 - - if flavor_id: - # Note: the deprecated argument is used. Falling back to single - # flavor behavior. - master_flavor_id = flavor_id - worker_flavor_id = flavor_id - - node_groups = [ - { - "name": "master-ng", - "flavor_id": master_flavor_id, - "node_processes": sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["master"], - "count": 1 - }, { - "name": "worker-ng", - "flavor_id": worker_flavor_id, - "node_processes": sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["worker"], - "count": workers_count - proxies_count - } - ] - - if proxies_count: - node_groups.append({ - "name": "proxy-ng", - "flavor_id": worker_flavor_id, - "node_processes": sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["worker"], - "count": proxies_count - }) - - if "manager" in (sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]): - # Adding manager group separately as it is supported only in - # specific configurations. - - node_groups.append({ - "name": "manager-ng", - "flavor_id": master_flavor_id, - "node_processes": sahara_consts.NODE_PROCESSES[plugin_name] - [hadoop_version]["manager"], - "count": 1 - }) - - node_groups = self._setup_floating_ip_pool(node_groups, - floating_ip_pool, - enable_proxy) - - neutron_net_id = self._get_neutron_net_id() - - node_groups = self._setup_volumes(node_groups, volumes_per_node, - volumes_size) - - node_groups = self._setup_security_groups(node_groups, - auto_security_group, - security_groups) - - node_groups = self._setup_node_configs(node_groups, node_configs) - - node_groups = self._setup_node_autoconfig(node_groups, use_autoconfig) - - replication_config = self._setup_replication_config(hadoop_version, - workers_count, - plugin_name) - - # The replication factor should be set for small clusters. However the - # cluster_configs parameter can override it - merged_cluster_configs = self._merge_configs(replication_config, - cluster_configs) - - aa_processes = None - if enable_anti_affinity: - aa_processes = (sahara_consts.ANTI_AFFINITY_PROCESSES[plugin_name] - [hadoop_version]) - - name = self.generate_random_name() - - cluster_object = self.clients("sahara").clusters.create( - name=name, - plugin_name=plugin_name, - hadoop_version=hadoop_version, - node_groups=node_groups, - default_image_id=image_id, - net_id=neutron_net_id, - cluster_configs=merged_cluster_configs, - anti_affinity=aa_processes, - use_autoconfig=use_autoconfig - ) - - if wait_active: - LOG.debug("Starting cluster `%s`" % name) - self._wait_active(cluster_object) - - return self.clients("sahara").clusters.get(cluster_object.id) - - def _update_cluster(self, cluster): - return self.clients("sahara").clusters.get(cluster.id) - - def _scale_cluster(self, cluster, delta): - """The scaling helper. - - This method finds the worker node group in a cluster, builds a - scale_object required by Sahara API and waits for the scaling to - complete. - - NOTE: This method is not meant to be called directly in scenarios. - There two specific scaling methods of up and down scaling which have - different atomic timers. - """ - worker_node_group = [g for g in cluster.node_groups - if "worker" in g["name"]][0] - scale_object = { - "resize_node_groups": [ - { - "name": worker_node_group["name"], - "count": worker_node_group["count"] + delta - } - ] - } - self.clients("sahara").clusters.scale(cluster.id, scale_object) - - self._wait_active(cluster) - - @atomic.action_timer("sahara.scale_up") - def _scale_cluster_up(self, cluster, delta): - """Add a given number of worker nodes to the cluster. - - :param cluster: The cluster to be scaled - :param delta: The number of workers to be added. (A positive number is - expected here) - """ - self._scale_cluster(cluster, delta) - - @atomic.action_timer("sahara.scale_down") - def _scale_cluster_down(self, cluster, delta): - """Remove a given number of worker nodes from the cluster. - - :param cluster: The cluster to be scaled - :param delta: The number of workers to be removed. (A negative number - is expected here) - """ - self._scale_cluster(cluster, delta) - - @atomic.action_timer("sahara.delete_cluster") - def _delete_cluster(self, cluster): - """Delete cluster. - - :param cluster: cluster to delete - """ - - LOG.debug("Deleting cluster `%s`" % cluster.name) - self.clients("sahara").clusters.delete(cluster.id) - - utils.wait_for( - resource=cluster, - timeout=CONF.openstack.sahara_cluster_delete_timeout, - check_interval=CONF.openstack.sahara_cluster_check_interval, - is_ready=self._is_cluster_deleted) - - def _is_cluster_deleted(self, cluster): - LOG.debug("Checking cluster `%s` to be deleted. Status: `%s`" - % (cluster.name, cluster.status)) - try: - self.clients("sahara").clusters.get(cluster.id) - return False - except sahara_base.APIException: - return True - - def _create_output_ds(self): - """Create an output Data Source based on EDP context - - :returns: The created Data Source - """ - ds_type = self.context["sahara"]["output_conf"]["output_type"] - url_prefix = self.context["sahara"]["output_conf"]["output_url_prefix"] - - if ds_type == "swift": - raise exceptions.RallyException( - "Swift Data Sources are not implemented yet") - - url = url_prefix.rstrip("/") + "/%s" % self.generate_random_name() - - return self.clients("sahara").data_sources.create( - name=self.generate_random_name(), - description="", - data_source_type=ds_type, - url=url) - - def _run_job_execution(self, job_id, cluster_id, input_id, output_id, - configs, job_idx): - """Run a Job Execution and wait until it completes or fails. - - The Job Execution is accepted as successful when Oozie reports - "success" or "succeeded" status. The failure statuses are "failed" and - "killed". - - The timeout and the polling interval may be configured through - "sahara_job_execution_timeout" and "sahara_job_check_interval" - parameters under the "benchmark" section. - - :param job_id: The Job id that will be executed - :param cluster_id: The Cluster id which will execute the Job - :param input_id: The input Data Source id - :param output_id: The output Data Source id - :param configs: The config dict that will be passed as Job Execution's - parameters. - :param job_idx: The index of a job in a sequence - - """ - @atomic.action_timer("sahara.job_execution_%s" % job_idx) - def run(self): - job_execution = self.clients("sahara").job_executions.create( - job_id=job_id, - cluster_id=cluster_id, - input_id=input_id, - output_id=output_id, - configs=configs) - - utils.wait_for( - resource=job_execution.id, - is_ready=self._job_execution_is_finished, - timeout=CONF.openstack.sahara_job_execution_timeout, - check_interval=CONF.openstack.sahara_job_check_interval) - - run(self) - - def _job_execution_is_finished(self, je_id): - status = self.clients("sahara").job_executions.get(je_id).info[ - "status"].lower() - - LOG.debug("Checking for Job Execution %s to complete. Status: %s" - % (je_id, status)) - if status in ("success", "succeeded"): - return True - elif status in ("failed", "killed"): - raise exceptions.RallyException( - "Job execution %s has failed" % je_id) - return False - - def _merge_configs(self, *configs): - """Merge configs in special format. - - It supports merging of configs in the following format: - applicable_target -> config_name -> config_value - - """ - result = {} - for config_dict in configs: - if config_dict: - for a_target in config_dict: - if a_target not in result or not result[a_target]: - result[a_target] = {} - result[a_target].update(config_dict[a_target]) - - return result - - def _get_neutron_net_id(self): - """Get the Neutron Network id from context. - - If Nova Network is used as networking backend, None is returned. - - :returns: Network id for Neutron or None for Nova Networking. - """ - - if consts.Service.NEUTRON not in self.clients("services").values(): - return None - - # Taking net id from context. - net = self.context["tenant"]["networks"][0] - neutron_net_id = net["id"] - LOG.debug("Using neutron network %s." % neutron_net_id) - LOG.debug("Using neutron router %s." % net["router_id"]) - - return neutron_net_id - - -def init_sahara_context(context_instance): - context_instance.context["sahara"] = context_instance.context.get("sahara", - {}) - for user, tenant_id in rutils.iterate_per_tenants( - context_instance.context["users"]): - context_instance.context["tenants"][tenant_id]["sahara"] = ( - context_instance.context["tenants"][tenant_id].get("sahara", {})) diff --git a/rally/plugins/openstack/scenarios/senlin/__init__.py b/rally/plugins/openstack/scenarios/senlin/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/senlin/clusters.py b/rally/plugins/openstack/scenarios/senlin/clusters.py deleted file mode 100644 index 8fb5daa641..0000000000 --- a/rally/plugins/openstack/scenarios/senlin/clusters.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.senlin import utils -from rally.task import validation - - -"""Scenarios for Senlin clusters.""" - - -@validation.add("required_platform", platform="openstack", admin=True) -@validation.add("required_services", services=[consts.Service.SENLIN]) -@validation.add("required_contexts", contexts=("profiles")) -@scenario.configure(context={"admin_cleanup@openstack": ["senlin"]}, - name="SenlinClusters.create_and_delete_cluster", - platform="openstack") -class CreateAndDeleteCluster(utils.SenlinScenario): - - def run(self, desired_capacity=0, min_size=0, - max_size=-1, timeout=3600, metadata=None): - """Create a cluster and then delete it. - - Measure the "senlin cluster-create" and "senlin cluster-delete" - commands performance. - - :param desired_capacity: The capacity or initial number of nodes - owned by the cluster - :param min_size: The minimum number of nodes owned by the cluster - :param max_size: The maximum number of nodes owned by the cluster. - -1 means no limit - :param timeout: The timeout value in seconds for cluster creation - :param metadata: A set of key value pairs to associate with the cluster - """ - - profile_id = self.context["tenant"]["profile"] - cluster = self._create_cluster(profile_id, desired_capacity, - min_size, max_size, timeout, metadata) - self._delete_cluster(cluster) diff --git a/rally/plugins/openstack/scenarios/senlin/utils.py b/rally/plugins/openstack/scenarios/senlin/utils.py deleted file mode 100644 index fd905fbe69..0000000000 --- a/rally/plugins/openstack/scenarios/senlin/utils.py +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally import exceptions -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class SenlinScenario(scenario.OpenStackScenario): - """Base class for Senlin scenarios with basic atomic actions.""" - - @atomic.action_timer("senlin.list_clusters") - def _list_clusters(self, **queries): - """Return user cluster list. - - :param kwargs \*\*queries: Optional query parameters to be sent to - restrict the clusters to be returned. Available parameters include: - - * name: The name of a cluster. - * status: The current status of a cluster. - * sort: A list of sorting keys separated by commas. Each sorting - key can optionally be attached with a sorting direction - modifier which can be ``asc`` or ``desc``. - * limit: Requests a specified size of returned items from the - query. Returns a number of items up to the specified limit - value. - * marker: Specifies the ID of the last-seen item. Use the limit - parameter to make an initial limited request and use the ID of - the last-seen item from the response as the marker parameter - value in a subsequent limited request. - * global_project: A boolean value indicating whether clusters - from all projects will be returned. - - :returns: list of clusters according to query. - """ - return list(self.admin_clients("senlin").clusters(**queries)) - - @atomic.action_timer("senlin.create_cluster") - def _create_cluster(self, profile_id, desired_capacity=0, min_size=0, - max_size=-1, timeout=60, metadata=None): - """Create a new cluster from attributes. - - :param profile_id: ID of profile used to create cluster - :param desired_capacity: The capacity or initial number of nodes - owned by the cluster - :param min_size: The minimum number of nodes owned by the cluster - :param max_size: The maximum number of nodes owned by the cluster. - -1 means no limit - :param timeout: The timeout value in minutes for cluster creation - :param metadata: A set of key value pairs to associate with the cluster - - :returns: object of cluster created. - """ - attrs = { - "profile_id": profile_id, - "name": self.generate_random_name(), - "desired_capacity": desired_capacity, - "min_size": min_size, - "max_size": max_size, - "metadata": metadata, - "timeout": timeout - } - - cluster = self.admin_clients("senlin").create_cluster(**attrs) - cluster = utils.wait_for_status( - cluster, - ready_statuses=["ACTIVE"], - failure_statuses=["ERROR"], - update_resource=self._get_cluster, - timeout=CONF.openstack.senlin_action_timeout) - - return cluster - - def _get_cluster(self, cluster): - """Get cluster details. - - :param cluster: cluster to get - - :returns: object of cluster - """ - try: - return self.admin_clients("senlin").get_cluster(cluster.id) - except Exception as e: - if getattr(e, "code", getattr(e, "http_status", 400)) == 404: - raise exceptions.GetResourceNotFound(resource=cluster.id) - raise exceptions.GetResourceFailure(resource=cluster.id, err=e) - - @atomic.action_timer("senlin.delete_cluster") - def _delete_cluster(self, cluster): - """Delete given cluster. - - Returns after the cluster is successfully deleted. - - :param cluster: cluster object to delete - """ - self.admin_clients("senlin").delete_cluster(cluster) - utils.wait_for_status( - cluster, - ready_statuses=["DELETED"], - failure_statuses=["ERROR"], - check_deletion=True, - update_resource=self._get_cluster, - timeout=CONF.openstack.senlin_action_timeout) - - @atomic.action_timer("senlin.create_profile") - def _create_profile(self, spec, metadata=None): - """Create a new profile from attributes. - - :param spec: spec dictionary used to create profile - :param metadata: A set of key value pairs to associate with the - profile - - :returns: object of profile created - """ - attrs = {} - attrs["spec"] = spec - attrs["name"] = self.generate_random_name() - if metadata: - attrs["metadata"] = metadata - - return self.clients("senlin").create_profile(**attrs) - - @atomic.action_timer("senlin.delete_profile") - def _delete_profile(self, profile): - """Delete given profile. - - Returns after the profile is successfully deleted. - - :param profile: profile object to be deleted - """ - self.clients("senlin").delete_profile(profile) diff --git a/rally/plugins/openstack/scenarios/swift/__init__.py b/rally/plugins/openstack/scenarios/swift/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/swift/objects.py b/rally/plugins/openstack/scenarios/swift/objects.py deleted file mode 100644 index c48d4cb6d1..0000000000 --- a/rally/plugins/openstack/scenarios/swift/objects.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tempfile - -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.swift import utils -from rally.task import validation - - -"""Scenarios for Swift Objects.""" - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["swift"]}, - name="SwiftObjects.create_container_and_object_then_list_objects", - platform="openstack") -class CreateContainerAndObjectThenListObjects(utils.SwiftScenario): - - def run(self, objects_per_container=1, object_size=1024, **kwargs): - """Create container and objects then list all objects. - - :param objects_per_container: int, number of objects to upload - :param object_size: int, temporary local object size - :param kwargs: dict, optional parameters to create container - """ - - with tempfile.TemporaryFile() as dummy_file: - # set dummy file to specified object size - dummy_file.truncate(object_size) - container_name = self._create_container(**kwargs) - for i in range(objects_per_container): - dummy_file.seek(0) - self._upload_object(container_name, dummy_file) - self._list_objects(container_name) - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["swift"]}, - name="SwiftObjects.create_container_and_object_then_delete_all", - platform="openstack") -class CreateContainerAndObjectThenDeleteAll(utils.SwiftScenario): - - def run(self, objects_per_container=1, object_size=1024, **kwargs): - """Create container and objects then delete everything created. - - :param objects_per_container: int, number of objects to upload - :param object_size: int, temporary local object size - :param kwargs: dict, optional parameters to create container - """ - container_name = None - objects_list = [] - with tempfile.TemporaryFile() as dummy_file: - # set dummy file to specified object size - dummy_file.truncate(object_size) - container_name = self._create_container(**kwargs) - for i in range(objects_per_container): - dummy_file.seek(0) - object_name = self._upload_object(container_name, - dummy_file)[1] - objects_list.append(object_name) - - for object_name in objects_list: - self._delete_object(container_name, object_name) - self._delete_container(container_name) - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"cleanup@openstack": ["swift"]}, - name="SwiftObjects.create_container_and_object_then_download_object", - platform="openstack") -class CreateContainerAndObjectThenDownloadObject(utils.SwiftScenario): - - def run(self, objects_per_container=1, object_size=1024, **kwargs): - """Create container and objects then download all objects. - - :param objects_per_container: int, number of objects to upload - :param object_size: int, temporary local object size - :param kwargs: dict, optional parameters to create container - """ - container_name = None - objects_list = [] - with tempfile.TemporaryFile() as dummy_file: - # set dummy file to specified object size - dummy_file.truncate(object_size) - container_name = self._create_container(**kwargs) - for i in range(objects_per_container): - dummy_file.seek(0) - object_name = self._upload_object(container_name, - dummy_file)[1] - objects_list.append(object_name) - - for object_name in objects_list: - self._download_object(container_name, object_name) - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"swift_objects@openstack": {}}, - name="SwiftObjects.list_objects_in_containers", - platform="openstack") -class ListObjectsInContainers(utils.SwiftScenario): - - def run(self): - """List objects in all containers.""" - - containers = self._list_containers()[1] - - for container in containers: - self._list_objects(container["name"]) - - -@validation.add("required_services", services=[consts.Service.SWIFT]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure( - context={"swift_objects@openstack": {}}, - name="SwiftObjects.list_and_download_objects_in_containers", - platform="openstack") -class ListAndDownloadObjectsInContainers(utils.SwiftScenario): - - def run(self): - """List and download objects in all containers.""" - - containers = self._list_containers()[1] - - objects_dict = {} - for container in containers: - container_name = container["name"] - objects_dict[container_name] = self._list_objects( - container_name)[1] - - for container_name, objects in objects_dict.items(): - for obj in objects: - self._download_object(container_name, obj["name"]) diff --git a/rally/plugins/openstack/scenarios/swift/utils.py b/rally/plugins/openstack/scenarios/swift/utils.py deleted file mode 100644 index 536e4564b0..0000000000 --- a/rally/plugins/openstack/scenarios/swift/utils.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack import scenario -from rally.task import atomic - - -class SwiftScenario(scenario.OpenStackScenario): - """Base class for Swift scenarios with basic atomic actions.""" - - @atomic.action_timer("swift.list_containers") - def _list_containers(self, full_listing=True, **kwargs): - """Return list of containers. - - :param full_listing: bool, enable unlimit number of listing returned - :param kwargs: dict, other optional parameters to get_account - - :returns: tuple, (dict of response headers, a list of containers) - """ - return self.clients("swift").get_account(full_listing=full_listing, - **kwargs) - - @atomic.action_timer("swift.create_container") - def _create_container(self, public=False, **kwargs): - """Create a new container. - - :param public: bool, set container as public - :param kwargs: dict, other optional parameters to put_container - - :returns: container name - """ - if public: - kwargs.setdefault("headers", {}) - kwargs["headers"].setdefault("X-Container-Read", ".r:*,.rlistings") - - container_name = self.generate_random_name() - - self.clients("swift").put_container(container_name, **kwargs) - return container_name - - @atomic.action_timer("swift.delete_container") - def _delete_container(self, container_name, **kwargs): - """Delete a container with given name. - - :param container_name: str, name of the container to delete - :param kwargs: dict, other optional parameters to delete_container - """ - self.clients("swift").delete_container(container_name, **kwargs) - - @atomic.action_timer("swift.list_objects") - def _list_objects(self, container_name, full_listing=True, **kwargs): - """Return objects inside container. - - :param container_name: str, name of the container to make the list - objects operation against - :param full_listing: bool, enable unlimit number of listing returned - :param kwargs: dict, other optional parameters to get_container - - :returns: tuple, (dict of response headers, a list of objects) - """ - return self.clients("swift").get_container(container_name, - full_listing=full_listing, - **kwargs) - - @atomic.action_timer("swift.upload_object") - def _upload_object(self, container_name, content, **kwargs): - """Upload content to a given container. - - :param container_name: str, name of the container to upload object to - :param content: file stream, content to upload - :param kwargs: dict, other optional parameters to put_object - - :returns: tuple, (etag and object name) - """ - object_name = self.generate_random_name() - - return (self.clients("swift").put_object(container_name, object_name, - content, **kwargs), - object_name) - - @atomic.action_timer("swift.download_object") - def _download_object(self, container_name, object_name, **kwargs): - """Download object from container. - - :param container_name: str, name of the container to download object - from - :param object_name: str, name of the object to download - :param kwargs: dict, other optional parameters to get_object - - :returns: tuple, (dict of response headers, the object's contents) - """ - return self.clients("swift").get_object(container_name, object_name, - **kwargs) - - @atomic.action_timer("swift.delete_object") - def _delete_object(self, container_name, object_name, **kwargs): - """Delete object from container. - - :param container_name: str, name of the container to delete object from - :param object_name: str, name of the object to delete - :param kwargs: dict, other optional parameters to delete_object - """ - self.clients("swift").delete_object(container_name, object_name, - **kwargs) diff --git a/rally/plugins/openstack/scenarios/vm/__init__.py b/rally/plugins/openstack/scenarios/vm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/vm/utils.py b/rally/plugins/openstack/scenarios/vm/utils.py deleted file mode 100644 index 894f590ad7..0000000000 --- a/rally/plugins/openstack/scenarios/vm/utils.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os.path -import subprocess -import sys - -import netaddr -import six - -from rally.common import cfg -from rally.common import logging -from rally.common import sshutils -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from rally.plugins.openstack.wrappers import network as network_wrapper -from rally.task import atomic -from rally.task import utils - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF - - -class Host(object): - - ICMP_UP_STATUS = "ICMP UP" - ICMP_DOWN_STATUS = "ICMP DOWN" - - name = "ip" - - def __init__(self, ip): - self.ip = netaddr.IPAddress(ip) - self.status = self.ICMP_DOWN_STATUS - - @property - def id(self): - return self.ip.format() - - @classmethod - def update_status(cls, server): - """Check ip address is pingable and update status.""" - ping = "ping" if server.ip.version == 4 else "ping6" - if sys.platform.startswith("linux"): - cmd = [ping, "-c1", "-w1", server.ip.format()] - else: - cmd = [ping, "-c1", server.ip.format()] - - proc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - proc.wait() - LOG.debug("Host %s is ICMP %s" - % (server.ip.format(), proc.returncode and "down" or "up")) - if proc.returncode == 0: - server.status = cls.ICMP_UP_STATUS - else: - server.status = cls.ICMP_DOWN_STATUS - return server - - def __eq__(self, other): - if not isinstance(other, Host): - raise TypeError("%s should be an instance of %s" % ( - other, Host.__class__.__name__)) - return self.ip == other.ip and self.status == other.status - - def __ne__(self, other): - return not self.__eq__(other) - - -class VMScenario(nova_utils.NovaScenario): - """Base class for VM scenarios with basic atomic actions. - - VM scenarios are scenarios executed inside some launched VM instance. - """ - - USER_RWX_OTHERS_RX_ACCESS_MODE = 0o755 - - RESOURCE_NAME_PREFIX = "rally_vm_" - - @atomic.action_timer("vm.run_command_over_ssh") - def _run_command_over_ssh(self, ssh, command): - """Run command inside an instance. - - This is a separate function so that only script execution is timed. - - :param ssh: A SSHClient instance. - :param command: Dictionary specifying command to execute. - See `rally info find VMTasks.boot_runcommand_delete' parameter - `command' docstring for explanation. - - :returns: tuple (exit_status, stdout, stderr) - """ - cmd, stdin = [], None - - interpreter = command.get("interpreter") or [] - if interpreter: - if isinstance(interpreter, six.string_types): - interpreter = [interpreter] - elif type(interpreter) != list: - raise ValueError("command 'interpreter' value must be str " - "or list type") - cmd.extend(interpreter) - - remote_path = command.get("remote_path") or [] - if remote_path: - if isinstance(remote_path, six.string_types): - remote_path = [remote_path] - elif type(remote_path) != list: - raise ValueError("command 'remote_path' value must be str " - "or list type") - cmd.extend(remote_path) - if command.get("local_path"): - ssh.put_file(os.path.expanduser( - command["local_path"]), remote_path[-1], - mode=self.USER_RWX_OTHERS_RX_ACCESS_MODE) - - if command.get("script_file"): - stdin = open(os.path.expanduser(command["script_file"]), "rb") - - elif command.get("script_inline"): - stdin = six.moves.StringIO(command["script_inline"]) - - cmd.extend(command.get("command_args") or []) - - return ssh.execute(cmd, stdin=stdin) - - def _boot_server_with_fip(self, image, flavor, use_floating_ip=True, - floating_network=None, **kwargs): - """Boot server prepared for SSH actions.""" - kwargs["auto_assign_nic"] = True - server = self._boot_server(image, flavor, **kwargs) - - if not server.networks: - raise RuntimeError( - "Server `%s' is not connected to any network. " - "Use network context for auto-assigning networks " - "or provide `nics' argument with specific net-id." % - server.name) - - if use_floating_ip: - fip = self._attach_floating_ip(server, floating_network) - else: - internal_network = list(server.networks)[0] - fip = {"ip": server.addresses[internal_network][0]["addr"]} - - return server, {"ip": fip.get("ip"), - "id": fip.get("id"), - "is_floating": use_floating_ip} - - @atomic.action_timer("vm.attach_floating_ip") - def _attach_floating_ip(self, server, floating_network): - internal_network = list(server.networks)[0] - fixed_ip = server.addresses[internal_network][0]["addr"] - - with atomic.ActionTimer(self, "neutron.create_floating_ip"): - fip = network_wrapper.wrap(self.clients, self).create_floating_ip( - ext_network=floating_network, - tenant_id=server.tenant_id, fixed_ip=fixed_ip) - - self._associate_floating_ip(server, fip, fixed_address=fixed_ip) - - return fip - - @atomic.action_timer("vm.delete_floating_ip") - def _delete_floating_ip(self, server, fip): - with logging.ExceptionLogger( - LOG, "Unable to delete IP: %s" % fip["ip"]): - if self.check_ip_address(fip["ip"])(server): - self._dissociate_floating_ip(server, fip) - with atomic.ActionTimer(self, "neutron.delete_floating_ip"): - network_wrapper.wrap(self.clients, - self).delete_floating_ip( - fip["id"], wait=True) - - def _delete_server_with_fip(self, server, fip, force_delete=False): - if fip["is_floating"]: - self._delete_floating_ip(server, fip) - return self._delete_server(server, force=force_delete) - - @atomic.action_timer("vm.wait_for_ssh") - def _wait_for_ssh(self, ssh, timeout=120, interval=1): - ssh.wait(timeout, interval) - - @atomic.action_timer("vm.wait_for_ping") - def _wait_for_ping(self, server_ip): - server = Host(server_ip) - utils.wait_for_status( - server, - ready_statuses=[Host.ICMP_UP_STATUS], - update_resource=Host.update_status, - timeout=CONF.openstack.vm_ping_timeout, - check_interval=CONF.openstack.vm_ping_poll_interval - ) - - def _run_command(self, server_ip, port, username, password, command, - pkey=None, timeout=120, interval=1): - """Run command via SSH on server. - - Create SSH connection for server, wait for server to become available - (there is a delay between server being set to ACTIVE and sshd being - available). Then call run_command_over_ssh to actually execute the - command. - - :param server_ip: server ip address - :param port: ssh port for SSH connection - :param username: str. ssh username for server - :param password: Password for SSH authentication - :param command: Dictionary specifying command to execute. - See `rally info find VMTasks.boot_runcommand_delete' parameter - `command' docstring for explanation. - :param pkey: key for SSH authentication - :param timeout: wait for ssh timeout. Default is 120 seconds - :param interval: ssh retry interval. Default is 1 second - - :returns: tuple (exit_status, stdout, stderr) - """ - pkey = pkey if pkey else self.context["user"]["keypair"]["private"] - ssh = sshutils.SSH(username, server_ip, port=port, - pkey=pkey, password=password) - self._wait_for_ssh(ssh, timeout, interval) - return self._run_command_over_ssh(ssh, command) diff --git a/rally/plugins/openstack/scenarios/vm/vmtasks.py b/rally/plugins/openstack/scenarios/vm/vmtasks.py deleted file mode 100644 index 923d3e689e..0000000000 --- a/rally/plugins/openstack/scenarios/vm/vmtasks.py +++ /dev/null @@ -1,534 +0,0 @@ -# Copyright 2014: Rackspace UK -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import pkgutil - -from rally.common import logging -from rally.common import sshutils -from rally.common import validation -from rally import consts -from rally import exceptions -from rally.plugins.common import validators -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils -from rally.plugins.openstack.scenarios.vm import utils as vm_utils -from rally.plugins.openstack.services import heat -from rally.task import atomic -from rally.task import types - - -"""Scenarios that are to be run inside VM instances.""" - - -LOG = logging.getLogger(__name__) - - -# TODO(andreykurilin): replace by advanced jsonschema(lollipop?!) someday -@validation.configure(name="valid_command", platform="openstack") -class ValidCommandValidator(validators.FileExistsValidator): - - def __init__(self, param_name, required=True): - """Checks that parameter is a proper command-specifying dictionary. - - Ensure that the command dictionary is a proper command-specifying - dictionary described in 'vmtasks.VMTasks.boot_runcommand_delete' - docstring. - - :param param_name: Name of parameter to validate - :param required: Boolean indicating that the command dictionary is - required - """ - super(ValidCommandValidator, self).__init__(param_name=param_name) - - self.required = required - - def check_command_dict(self, command): - """Check command-specifying dict `command' - - :raises ValueError: on error - """ - - if not isinstance(command, dict): - self.fail("Command must be a dictionary") - - # NOTE(pboldin): Here we check for the values not for presence of the - # keys due to template-driven configuration generation that can leave - # keys defined but values empty. - if command.get("interpreter"): - script_file = command.get("script_file") - if script_file: - if "script_inline" in command: - self.fail( - "Exactly one of script_inline or script_file with " - "interpreter is expected: %r" % command) - # User tries to upload a shell? Make sure it is same as interpreter - interpreter = command.get("interpreter") - interpreter = (interpreter[-1] - if isinstance(interpreter, (tuple, list)) - else interpreter) - if (command.get("local_path") and - command.get("remote_path") != interpreter): - self.fail( - "When uploading an interpreter its path should be as well" - " specified as the `remote_path' string: %r" % command) - elif not command.get("remote_path"): - # No interpreter and no remote command to execute is given - self.fail( - "Supplied dict specifies no command to execute, either " - "interpreter or remote_path is required: %r" % command) - - unexpected_keys = set(command) - {"script_file", "script_inline", - "interpreter", "remote_path", - "local_path", "command_args"} - if unexpected_keys: - self.fail( - "Unexpected command parameters: %s" % ", ".join( - unexpected_keys)) - - def validate(self, context, config, plugin_cls, plugin_cfg): - command = config.get("args", {}).get(self.param_name) - if command is None and not self.required: - return - - try: - self.check_command_dict(command) - except ValueError as e: - return self.fail(str(e)) - - for key in "script_file", "local_path": - if command.get(key): - self._file_access_ok( - filename=command[key], mode=os.R_OK, - param_name=self.param_name, required=self.required) - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image", fail_on_404_image=False) -@validation.add("valid_command", param_name="command") -@validation.add("number", param_name="port", minval=1, maxval=65535, - nullable=True, integer_only=True) -@validation.add("external_network_exists", param_name="floating_network") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_param_or_context", - param_name="image", ctx_name="image_command_customizer") -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"], - "keypair@openstack": {}, - "allow_ssh@openstack": None}, - name="VMTasks.boot_runcommand_delete", - platform="openstack") -class BootRuncommandDelete(vm_utils.VMScenario, cinder_utils.CinderBasic): - - def run(self, flavor, username, password=None, - image=None, - command=None, - volume_args=None, floating_network=None, port=22, - use_floating_ip=True, force_delete=False, wait_for_ping=True, - max_log_length=None, **kwargs): - """Boot a server, run script specified in command and delete server. - - :param image: glance image name to use for the vm. Optional - in case of specified "image_command_customizer" context - :param flavor: VM flavor name - :param username: ssh username on server, str - :param password: Password on SSH authentication - :param command: Command-specifying dictionary that either specifies - remote command path via `remote_path' (can be uploaded from a - local file specified by `local_path`), an inline script via - `script_inline' or a local script file path using `script_file'. - Both `script_file' and `local_path' are checked to be accessible - by the `file_exists' validator code. - - The `script_inline' and `script_file' both require an `interpreter' - value to specify the interpreter script should be run with. - - Note that any of `interpreter' and `remote_path' can be an array - prefixed with environment variables and suffixed with args for - the `interpreter' command. `remote_path's last component must be - a path to a command to execute (also upload destination if a - `local_path' is given). Uploading an interpreter is possible - but requires that `remote_path' and `interpreter' path do match. - - Examples: - - .. code-block:: python - - # Run a `local_script.pl' file sending it to a remote - # Perl interpreter - command = { - "script_file": "local_script.pl", - "interpreter": "/usr/bin/perl" - } - - # Run an inline script sending it to a remote interpreter - command = { - "script_inline": "echo 'Hello, World!'", - "interpreter": "/bin/sh" - } - - # Run a remote command - command = { - "remote_path": "/bin/false" - } - - # Copy a local command and run it - command = { - "remote_path": "/usr/local/bin/fio", - "local_path": "/home/foobar/myfiodir/bin/fio" - } - - # Copy a local command and run it with environment variable - command = { - "remote_path": ["HOME=/root", "/usr/local/bin/fio"], - "local_path": "/home/foobar/myfiodir/bin/fio" - } - - # Run an inline script sending it to a remote interpreter - command = { - "script_inline": "echo \"Hello, ${NAME:-World}\"", - "interpreter": ["NAME=Earth", "/bin/sh"] - } - - # Run an inline script sending it to an uploaded remote - # interpreter - command = { - "script_inline": "echo \"Hello, ${NAME:-World}\"", - "interpreter": ["NAME=Earth", "/tmp/sh"], - "remote_path": "/tmp/sh", - "local_path": "/home/user/work/cve/sh-1.0/bin/sh" - } - - - :param volume_args: volume args for booting server from volume - :param floating_network: external network name, for floating ip - :param port: ssh port for SSH connection - :param use_floating_ip: bool, floating or fixed IP for SSH connection - :param force_delete: whether to use force_delete for servers - :param wait_for_ping: whether to check connectivity on server creation - :param max_log_length: The number of tail nova console-log lines user - would like to retrieve - :param kwargs: extra arguments for booting the server - """ - if volume_args: - volume = self.cinder.create_volume(volume_args["size"], - imageRef=None) - kwargs["block_device_mapping"] = {"vdrally": "%s:::1" % volume.id} - - if not image: - image = self.context["tenant"]["custom_image"]["id"] - - server, fip = self._boot_server_with_fip( - image, flavor, use_floating_ip=use_floating_ip, - floating_network=floating_network, - key_name=self.context["user"]["keypair"]["name"], - **kwargs) - try: - if wait_for_ping: - self._wait_for_ping(fip["ip"]) - - code, out, err = self._run_command( - fip["ip"], port, username, password, command=command) - text_area_output = ["StdErr: %s" % (err or "(none)"), - "StdOut:"] - if code: - raise exceptions.ScriptError( - "Error running command %(command)s. " - "Error %(code)s: %(error)s" % { - "command": command, "code": code, "error": err}) - # Let's try to load output data - try: - data = json.loads(out) - # 'echo 42' produces very json-compatible result - # - check it here - if not isinstance(data, dict): - raise ValueError - except ValueError: - # It's not a JSON, probably it's 'script_inline' result - data = [] - except (exceptions.TimeoutException, - exceptions.SSHTimeout): - console_logs = self._get_server_console_output(server, - max_log_length) - LOG.debug("VM console logs:\n%s" % console_logs) - raise - - finally: - self._delete_server_with_fip(server, fip, - force_delete=force_delete) - - if isinstance(data, dict) and set(data) == {"additive", "complete"}: - for chart_type, charts in data.items(): - for chart in charts: - self.add_output(**{chart_type: chart}) - else: - # it's a dict with several unknown lines - text_area_output.extend(out.split("\n")) - self.add_output(complete={"title": "Script Output", - "chart_plugin": "TextArea", - "data": text_area_output}) - - -@scenario.configure(context={"cleanup@openstack": ["nova", "heat"], - "keypair@openstack": {}, "network@openstack": {}}, - name="VMTasks.runcommand_heat") -class RuncommandHeat(vm_utils.VMScenario): - - def run(self, workload, template, files, parameters): - """Run workload on stack deployed by heat. - - Workload can be either file or resource: - - .. code-block:: json - - {"file": "/path/to/file.sh"} - {"resource": ["package.module", "workload.py"]} - - - Also it should contain "username" key. - - Given file will be uploaded to `gate_node` and started. This script - should print `key` `value` pairs separated by colon. These pairs will - be presented in results. - - Gate node should be accessible via ssh with keypair `key_name`, so - heat template should accept parameter `key_name`. - - :param workload: workload to run - :param template: path to heat template file - :param files: additional template files - :param parameters: parameters for heat template - """ - keypair = self.context["user"]["keypair"] - parameters["key_name"] = keypair["name"] - network = self.context["tenant"]["networks"][0] - parameters["router_id"] = network["router_id"] - self.stack = heat.main.Stack(self, self.task, - template, files=files, - parameters=parameters) - self.stack.create() - for output in self.stack.stack.outputs: - if output["output_key"] == "gate_node": - ip = output["output_value"] - break - ssh = sshutils.SSH(workload["username"], ip, pkey=keypair["private"]) - ssh.wait() - script = workload.get("resource") - if script: - script = pkgutil.get_data(*script) - else: - script = open(workload["file"]).read() - ssh.execute("cat > /tmp/.rally-workload", stdin=script) - ssh.execute("chmod +x /tmp/.rally-workload") - with atomic.ActionTimer(self, "runcommand_heat.workload"): - status, out, err = ssh.execute( - "/tmp/.rally-workload", - stdin=json.dumps(self.stack.stack.outputs)) - rows = [] - for line in out.splitlines(): - row = line.split(":") - if len(row) != 2: - raise exceptions.ScriptError("Invalid data '%s'" % line) - rows.append(row) - if not rows: - raise exceptions.ScriptError("No data returned. Original error " - "message is %s" % err) - self.add_output( - complete={"title": "Workload summary", - "description": "Data generated by workload", - "chart_plugin": "Table", - "data": { - "cols": ["key", "value"], - "rows": rows}} - ) - -BASH_DD_LOAD_TEST = """ -#!/bin/sh -# Load server and output JSON results ready to be processed -# by Rally scenario - -for ex in awk top grep free tr df dc dd gzip -do - if ! type ${ex} >/dev/null - then - echo "Executable is required by script but not available\ - on a server: ${ex}" >&2 - return 1 - fi -done - -get_used_cpu_percent() { - echo 100\ - $(top -b -n 1 | grep -i CPU | head -n 1 | awk '{print $8}' | tr -d %)\ - - p | dc -} - -get_used_ram_percent() { - local total=$(free | grep Mem: | awk '{print $2}') - local used=$(free | grep -- -/+\ buffers | awk '{print $3}') - echo ${used} 100 \* ${total} / p | dc -} - -get_used_disk_percent() { - df -P / | grep -v Filesystem | awk '{print $5}' | tr -d % -} - -get_seconds() { - (time -p ${1}) 2>&1 | awk '/real/{print $2}' -} - -complete_load() { - local script_file=${LOAD_SCRIPT_FILE:-/tmp/load.sh} - local stop_file=${LOAD_STOP_FILE:-/tmp/load.stop} - local processes_num=${LOAD_PROCESSES_COUNT:-20} - local size=${LOAD_SIZE_MB:-5} - - cat << EOF > ${script_file} -until test -e ${stop_file} -do dd if=/dev/urandom bs=1M count=${size} 2>/dev/null | gzip >/dev/null ; done -EOF - - local sep - local cpu - local ram - local dis - rm -f ${stop_file} - for i in $(seq ${processes_num}) - do - i=$((i-1)) - sh ${script_file} & - cpu="${cpu}${sep}[${i}, $(get_used_cpu_percent)]" - ram="${ram}${sep}[${i}, $(get_used_ram_percent)]" - dis="${dis}${sep}[${i}, $(get_used_disk_percent)]" - sep=", " - done - > ${stop_file} - cat << EOF - { - "title": "Generate load by spawning processes", - "description": "Each process runs gzip for ${size}M urandom data\ - in a loop", - "chart_plugin": "Lines", - "axis_label": "Number of processes", - "label": "Usage, %", - "data": [ - ["CPU", [${cpu}]], - ["Memory", [${ram}]], - ["Disk", [${dis}]]] - } -EOF -} - -additive_dd() { - local c=${1:-50} # Megabytes - local file=/tmp/dd_test.img - local write=$(get_seconds "dd if=/dev/zero of=${file} bs=1M count=${c}") - local read=$(get_seconds "dd if=${file} of=/dev/null bs=1M count=${c}") - local gzip=$(get_seconds "gzip ${file}") - rm ${file}.gz - cat << EOF - { - "title": "Write, read and gzip file", - "description": "Using file '${file}', size ${c}Mb.", - "chart_plugin": "StackedArea", - "data": [ - ["write_${c}M", ${write}], - ["read_${c}M", ${read}], - ["gzip_${c}M", ${gzip}]] - }, - { - "title": "Statistics for write/read/gzip", - "chart_plugin": "StatsTable", - "data": [ - ["write_${c}M", ${write}], - ["read_${c}M", ${read}], - ["gzip_${c}M", ${gzip}]] - } - -EOF -} - -cat << EOF -{ - "additive": [$(additive_dd)], - "complete": [$(complete_load)] -} -EOF -""" - - -@types.convert(image={"type": "glance_image"}, - flavor={"type": "nova_flavor"}) -@validation.add("image_valid_on_flavor", flavor_param="flavor", - image_param="image") -@validation.add("number", param_name="port", minval=1, maxval=65535, - nullable=True, integer_only=True) -@validation.add("external_network_exists", param_name="floating_network") -@validation.add("required_services", services=[consts.Service.NOVA, - consts.Service.CINDER]) -@validation.add("required_platform", platform="openstack", users=True) -@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"], - "keypair@openstack": {}, - "allow_ssh@openstack": None}, - name="VMTasks.dd_load_test", - platform="openstack") -class DDLoadTest(BootRuncommandDelete): - @logging.log_deprecated_args( - "Use 'interpreter' to specify the interpreter to execute script from.", - "0.10.0", ["command"], once=True) - def run(self, flavor, username, password=None, - image=None, command=None, interpreter="/bin/sh", - volume_args=None, floating_network=None, port=22, - use_floating_ip=True, force_delete=False, wait_for_ping=True, - max_log_length=None, **kwargs): - """Boot a server from a custom image and performs dd load test. - - .. note:: dd load test is prepared script by Rally team. It checks - writing and reading metrics from the VM. - - :param image: glance image name to use for the vm. Optional - in case of specified "image_command_customizer" context - :param flavor: VM flavor name - :param username: ssh username on server, str - :param password: Password on SSH authentication - :param interpreter: the interpreter to execute script with dd load test - (defaults to /bin/sh) - :param command: DEPRECATED. use interpreter instead. - :param volume_args: volume args for booting server from volume - :param floating_network: external network name, for floating ip - :param port: ssh port for SSH connection - :param use_floating_ip: bool, floating or fixed IP for SSH connection - :param force_delete: whether to use force_delete for servers - :param wait_for_ping: whether to check connectivity on server creation - :param max_log_length: The number of tail nova console-log lines user - would like to retrieve - :param kwargs: extra arguments for booting the server - """ - cmd = {"interpreter": interpreter, - "script_inline": BASH_DD_LOAD_TEST} - if command and "interpreter" in command: - cmd["interpreter"] = command["interpreter"] - return super(DDLoadTest, self).run( - flavor=flavor, username=username, password=password, - image=image, command=cmd, - volume_args=volume_args, floating_network=floating_network, - port=port, use_floating_ip=use_floating_ip, - force_delete=force_delete, - wait_for_ping=wait_for_ping, max_log_length=max_log_length, - **kwargs) diff --git a/rally/plugins/openstack/scenarios/watcher/__init__.py b/rally/plugins/openstack/scenarios/watcher/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/watcher/basic.py b/rally/plugins/openstack/scenarios/watcher/basic.py deleted file mode 100644 index 208199643d..0000000000 --- a/rally/plugins/openstack/scenarios/watcher/basic.py +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import consts -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.watcher import utils -from rally.task import types -from rally.task import validation - - -"""Scenarios for Watcher servers.""" - - -@types.convert(strategy={"type": "watcher_strategy"}, - goal={"type": "watcher_goal"}) -@validation.add("required_services", services=[consts.Service.WATCHER]) -@validation.add("required_platform", platform="openstack", admin=True) -@scenario.configure(context={"admin_cleanup@openstack": ["watcher"]}, - name="Watcher.create_audit_template_and_delete", - platform="openstack") -class CreateAuditTemplateAndDelete(utils.WatcherScenario): - - @logging.log_deprecated_args("Extra field has been removed " - "since it isn't used.", "0.8.0", ["extra"], - once=True) - def run(self, goal, strategy): - """Create audit template and delete it. - - :param goal: The goal audit template is based on - :param strategy: The strategy used to provide resource optimization - algorithm - """ - - audit_template = self._create_audit_template(goal, strategy) - self._delete_audit_template(audit_template.uuid) - - -@validation.add("required_services", services=[consts.Service.WATCHER]) -@scenario.configure(name="Watcher.list_audit_templates", platform="openstack") -class ListAuditTemplates(utils.WatcherScenario): - - def run(self, name=None, goal=None, strategy=None, - limit=None, sort_key=None, sort_dir=None, - detail=False): - """List existing audit templates. - - Audit templates are being created by Audit Template Context. - - :param name: Name of the audit template - :param goal: Name of the goal - :param strategy: Name of the strategy - :param limit: The maximum number of results to return per - request, if: - - 1) limit > 0, the maximum number of audit templates to return. - 2) limit == 0, return the entire list of audit_templates. - 3) limit param is NOT specified (None), the number of items - returned respect the maximum imposed by the Watcher API - (see Watcher's api.max_limit option). - :param sort_key: Optional, field used for sorting. - :param sort_dir: Optional, direction of sorting, either 'asc' (the - default) or 'desc'. - :param detail: Optional, boolean whether to return detailed information - about audit_templates. - """ - - self._list_audit_templates(name=name, goal=goal, strategy=strategy, - limit=limit, sort_key=sort_key, - sort_dir=sort_dir, detail=detail) - - -@validation.add("required_services", services=[consts.Service.WATCHER]) -@validation.add("required_contexts", contexts="audit_templates") -@scenario.configure(context={"admin_cleanup@openstack": ["watcher"]}, - name="Watcher.create_audit_and_delete", - platform="openstack") -class CreateAuditAndDelete(utils.WatcherScenario): - - def run(self): - """Create and delete audit. - - Create Audit, wait until whether Audit is in SUCCEEDED state or in - FAILED and delete audit. - """ - - audit_template_uuid = self.context["audit_templates"][0] - audit = self._create_audit(audit_template_uuid) - self._delete_audit(audit) diff --git a/rally/plugins/openstack/scenarios/watcher/utils.py b/rally/plugins/openstack/scenarios/watcher/utils.py deleted file mode 100644 index ce29d3e7e8..0000000000 --- a/rally/plugins/openstack/scenarios/watcher/utils.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally.plugins.openstack import scenario -from rally.task import atomic -from rally.task import utils - - -CONF = cfg.CONF - - -class WatcherScenario(scenario.OpenStackScenario): - """Base class for Watcher scenarios with basic atomic actions.""" - - @atomic.action_timer("watcher.create_audit_template") - def _create_audit_template(self, goal_id, strategy_id): - """Create Audit Template in DB - - :param goal_id: UUID Goal - :param strategy_id: UUID Strategy - :return: Audit Template object - """ - return self.admin_clients("watcher").audit_template.create( - goal=goal_id, - strategy=strategy_id, - name=self.generate_random_name()) - - @atomic.action_timer("watcher.delete_audit_template") - def _delete_audit_template(self, audit_template): - """Delete Audit Template from DB - - :param audit_template: Audit Template object - """ - self.admin_clients("watcher").audit_template.delete(audit_template) - - @atomic.action_timer("watcher.list_audit_templates") - def _list_audit_templates(self, name=None, goal=None, strategy=None, - limit=None, sort_key=None, sort_dir=None, - detail=False): - return self.admin_clients("watcher").audit_template.list( - name=name, goal=goal, strategy=strategy, limit=limit, - sort_key=sort_key, sort_dir=sort_dir, detail=detail) - - @atomic.action_timer("watcher.create_audit") - def _create_audit(self, audit_template_uuid): - audit = self.admin_clients("watcher").audit.create( - audit_template_uuid=audit_template_uuid, - audit_type="ONESHOT") - utils.wait_for_status( - audit, - ready_statuses=["SUCCEEDED"], - failure_statuses=["FAILED"], - status_attr="state", - update_resource=utils.get_from_manager(), - timeout=CONF.openstack.watcher_audit_launch_timeout, - check_interval=CONF.openstack.watcher_audit_launch_poll_interval, - id_attr="uuid" - ) - return audit - - @atomic.action_timer("watcher.delete_audit") - def _delete_audit(self, audit): - self.admin_clients("watcher").audit.delete(audit.uuid) diff --git a/rally/plugins/openstack/scenarios/zaqar/__init__.py b/rally/plugins/openstack/scenarios/zaqar/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/scenarios/zaqar/basic.py b/rally/plugins/openstack/scenarios/zaqar/basic.py deleted file mode 100644 index 4e6ae291ca..0000000000 --- a/rally/plugins/openstack/scenarios/zaqar/basic.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import logging -from rally.plugins.openstack import scenario -from rally.plugins.openstack.scenarios.zaqar import utils as zutils - - -"""Scenarios for Zaqar.""" - - -@scenario.configure(context={"cleanup@openstack": ["zaqar"]}, - name="ZaqarBasic.create_queue", platform="openstack") -class CreateQueue(zutils.ZaqarScenario): - - @logging.log_deprecated_args( - "The 'name_length' argument to create_queue is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=None, **kwargs): - """Create a Zaqar queue with a random name. - - :param kwargs: other optional parameters to create queues like - "metadata" - """ - self._queue_create(**kwargs) - - -@scenario.configure(context={"cleanup@openstack": ["zaqar"]}, - name="ZaqarBasic.producer_consumer", platform="openstack") -class ProducerConsumer(zutils.ZaqarScenario): - - @logging.log_deprecated_args( - "The 'name_length' argument to producer_consumer is ignored", - "0.1.2", ["name_length"], once=True) - def run(self, name_length=None, - min_msg_count=50, max_msg_count=200, **kwargs): - """Serial message producer/consumer. - - Creates a Zaqar queue with random name, sends a set of messages - and then retrieves an iterator containing those. - - :param min_msg_count: min number of messages to be posted - :param max_msg_count: max number of messages to be posted - :param kwargs: other optional parameters to create queues like - "metadata" - """ - - queue = self._queue_create(**kwargs) - msg_count = random.randint(min_msg_count, max_msg_count) - messages = [{"body": {"id": idx}, "ttl": 360} for idx - in range(msg_count)] - self._messages_post(queue, messages, min_msg_count, max_msg_count) - self._messages_list(queue) - self._queue_delete(queue) diff --git a/rally/plugins/openstack/scenarios/zaqar/utils.py b/rally/plugins/openstack/scenarios/zaqar/utils.py deleted file mode 100644 index eb68023906..0000000000 --- a/rally/plugins/openstack/scenarios/zaqar/utils.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack import scenario -from rally.task import atomic - - -class ZaqarScenario(scenario.OpenStackScenario): - """Base class for Zaqar scenarios with basic atomic actions.""" - - @atomic.action_timer("zaqar.create_queue") - def _queue_create(self, **kwargs): - """Create a Zaqar queue with random name. - - :param kwargs: other optional parameters to create queues like - "metadata" - :returns: Zaqar queue instance - """ - name = self.generate_random_name() - return self.clients("zaqar").queue(name, **kwargs) - - @atomic.action_timer("zaqar.delete_queue") - def _queue_delete(self, queue): - """Removes a Zaqar queue. - - :param queue: queue to remove - """ - - queue.delete() - - def _messages_post(self, queue, messages, min_msg_count, max_msg_count): - """Post a list of messages to a given Zaqar queue. - - :param queue: post the messages to queue - :param messages: messages to post - :param min_msg_count: minimum number of messages - :param max_msg_count: maximum number of messages - """ - with atomic.ActionTimer(self, "zaqar.post_between_%s_and_%s_messages" % - (min_msg_count, max_msg_count)): - queue.post(messages) - - @atomic.action_timer("zaqar.list_messages") - def _messages_list(self, queue): - """Gets messages from a given Zaqar queue. - - :param queue: get messages from queue - :returns: messages iterator - """ - - return queue.messages() diff --git a/rally/plugins/openstack/service.py b/rally/plugins/openstack/service.py deleted file mode 100644 index 634e77e169..0000000000 --- a/rally/plugins/openstack/service.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.task import service as base_service - - -service = base_service.service -compat_layer = base_service.compat_layer -Service = base_service.Service -should_be_overridden = base_service.should_be_overridden diff --git a/rally/plugins/openstack/services/__init__.py b/rally/plugins/openstack/services/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/services/gnocchi/__init__.py b/rally/plugins/openstack/services/gnocchi/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/services/gnocchi/metric.py b/rally/plugins/openstack/services/gnocchi/metric.py deleted file mode 100644 index 4621e80333..0000000000 --- a/rally/plugins/openstack/services/gnocchi/metric.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.task import atomic -from rally.task import service - - -class GnocchiService(service.Service): - - @atomic.action_timer("gnocchi.create_archive_policy") - def create_archive_policy(self, name, definition=None, - aggregation_methods=None): - """Create an archive policy. - - :param name: Archive policy name - :param definition: Archive policy definition - :param aggregation_methods: Aggregation method of the archive policy - """ - archive_policy = {"name": name} - archive_policy["definition"] = definition - archive_policy["aggregation_methods"] = aggregation_methods - return self._clients.gnocchi().archive_policy.create( - archive_policy) - - @atomic.action_timer("gnocchi.delete_archive_policy") - def delete_archive_policy(self, name): - """Delete an archive policy. - - :param name: Archive policy name - """ - return self._clients.gnocchi().archive_policy.delete(name) - - @atomic.action_timer("gnocchi.list_archive_policy") - def list_archive_policy(self): - """List archive policies.""" - return self._clients.gnocchi().archive_policy.list() - - @atomic.action_timer("gnocchi.create_archive_policy_rule") - def create_archive_policy_rule(self, name, metric_pattern=None, - archive_policy_name=None): - """Create an archive policy rule. - - :param name: Archive policy rule name - :param metric_pattern: Wildcard of metric name to match - :param archive_policy_name: Archive policy name - """ - archive_policy_rule = {"name": name} - archive_policy_rule["metric_pattern"] = metric_pattern - archive_policy_rule["archive_policy_name"] = archive_policy_name - return self._clients.gnocchi().archive_policy_rule.create( - archive_policy_rule) - - @atomic.action_timer("gnocchi.delete_archive_policy_rule") - def delete_archive_policy_rule(self, name): - """Delete an archive policy rule. - - :param name: Archive policy rule name - """ - return self._clients.gnocchi().archive_policy_rule.delete(name) - - @atomic.action_timer("gnocchi.list_archive_policy_rule") - def list_archive_policy_rule(self): - """List archive policy rules.""" - return self._clients.gnocchi().archive_policy_rule.list() - - @atomic.action_timer("gnocchi.list_capabilities") - def list_capabilities(self): - """List capabilities.""" - return self._clients.gnocchi().capabilities.list() - - @atomic.action_timer("gnocchi.get_measures_aggregation") - def get_measures_aggregation(self, metrics, aggregation=None, - refresh=None): - """Get measurements of aggregated metrics. - - :param metrics: Metric IDs or name - :param aggregation: Granularity aggregation function to retrieve - :param refresh: Force aggregation of all known measures - """ - return self._clients.gnocchi().metric.aggregation( - metrics=metrics, aggregation=aggregation, refresh=refresh) - - @atomic.action_timer("gnocchi.get_measures") - def get_measures(self, metric, aggregation=None, refresh=None): - """Get measurements of a metric. - - :param metric: Metric ID or name - :param aggregation: Aggregation to retrieve - :param refresh: Force aggregation of all known measures - """ - return self._clients.gnocchi().metric.get_measures( - metric=metric, aggregation=aggregation, refresh=refresh) - - @atomic.action_timer("gnocchi.create_metric") - def create_metric(self, name, archive_policy_name=None, resource_id=None, - unit=None): - """Create a metric. - - :param name: Metric name - :param archive_policy_name: Archive policy name - :param resource_id: The resource ID to attach the metric to - :param unit: The unit of the metric - """ - return self._clients.gnocchi().metric.create( - name=name, archive_policy_name=archive_policy_name, - resource_id=resource_id, unit=unit) - - @atomic.action_timer("gnocchi.delete_metric") - def delete_metric(self, metric_id): - """Delete a metric. - - :param metric_id: metric ID - """ - return self._clients.gnocchi().metric.delete(metric_id) - - @atomic.action_timer("gnocchi.list_metric") - def list_metric(self): - """List metrics.""" - return self._clients.gnocchi().metric.list() - - @atomic.action_timer("gnocchi.create_resource") - def create_resource(self, resource_type="generic"): - """Create a resource. - - :param resource_type: Type of the resource - """ - resource = {"id": self.generate_random_name()} - return self._clients.gnocchi().resource.create( - resource_type, resource) - - @atomic.action_timer("gnocchi.delete_resource") - def delete_resource(self, resource_id): - """Delete a resource. - - :param resource_id: ID of the resource - """ - return self._clients.gnocchi().resource.delete(resource_id) - - @atomic.action_timer("gnocchi.list_resource") - def list_resource(self, resource_type="generic"): - """List resources.""" - return self._clients.gnocchi().resource.list( - resource_type=resource_type) - - @atomic.action_timer("gnocchi.create_resource_type") - def create_resource_type(self, name): - """Create a resource type. - - :param name: Name of the resource type - """ - resource_type = {"name": name or self.generate_random_name()} - return self._clients.gnocchi().resource_type.create( - resource_type) - - @atomic.action_timer("gnocchi.delete_resource_type") - def delete_resource_type(self, resource_type): - """Delete a resource type. - - :param resource_type: Resource type dict - """ - return self._clients.gnocchi().resource_type.delete(resource_type) - - @atomic.action_timer("gnocchi.list_resource_type") - def list_resource_type(self): - """List resource types.""" - return self._clients.gnocchi().resource_type.list() - - @atomic.action_timer("gnocchi.get_status") - def get_status(self, detailed=False): - """Get the status of measurements processing. - - :param detailed: Get detailed status. - """ - return self._clients.gnocchi().status.get(detailed) diff --git a/rally/plugins/openstack/services/heat/__init__.py b/rally/plugins/openstack/services/heat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/services/heat/main.py b/rally/plugins/openstack/services/heat/main.py deleted file mode 100644 index af2c429be7..0000000000 --- a/rally/plugins/openstack/services/heat/main.py +++ /dev/null @@ -1,78 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally.common import utils as common_utils -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF - - -class Stack(common_utils.RandomNameGeneratorMixin): - """Represent heat stack. - - Usage: - >>> stack = Stack(scenario, task, "template.yaml", parameters={"nodes": 3}) - >>> do_testing(stack) - >>> stack.update(nodes=4) - >>> do_testing(stack) - """ - - def __init__(self, scenario, task, template, files, parameters=None): - """Init heat wrapper. - - :param Scenario scenario: scenario instance - :param Task task: task instance - :param str template: template file path - :param dict files: dict with file name and path - :param dict parameters: parameters for template - - """ - self.scenario = scenario - self.task = task - self.template = open(template).read() - self.files = {} - self.parameters = parameters - for name, path in files.items(): - self.files[name] = open(path).read() - - def _wait(self, ready_statuses, failure_statuses): - self.stack = utils.wait_for_status( - self.stack, - check_interval=CONF.openstack.heat_stack_create_poll_interval, - timeout=CONF.openstack.heat_stack_create_timeout, - ready_statuses=ready_statuses, - failure_statuses=failure_statuses, - update_resource=utils.get_from_manager(), - ) - - def create(self): - with atomic.ActionTimer(self.scenario, "heat.create"): - self.stack = self.scenario.clients("heat").stacks.create( - stack_name=self.scenario.generate_random_name(), - template=self.template, - files=self.files, - parameters=self.parameters) - self.stack_id = self.stack["stack"]["id"] - self.stack = self.scenario.clients( - "heat").stacks.get(self.stack_id) - self._wait(["CREATE_COMPLETE"], ["CREATE_FAILED"]) - - def update(self, data): - self.parameters.update(data) - with atomic.ActionTimer(self.scenario, "heat.update"): - self.scenario.clients("heat").stacks.update( - self.stack_id, template=self.template, - files=self.files, parameters=self.parameters) - self._wait(["UPDATE_COMPLETE"], ["UPDATE_FAILED"]) diff --git a/rally/plugins/openstack/services/identity/__init__.py b/rally/plugins/openstack/services/identity/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/services/identity/identity.py b/rally/plugins/openstack/services/identity/identity.py deleted file mode 100644 index b7661738c8..0000000000 --- a/rally/plugins/openstack/services/identity/identity.py +++ /dev/null @@ -1,247 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.task import service - - -Project = service.make_resource_cls("Project", ["id", "name", "domain_id"]) -User = service.make_resource_cls( - "User", properties=["id", "name", "project_id", "domain_id"]) -Service = service.make_resource_cls("Service", properties=["id", "name"]) -Role = service.make_resource_cls("Role", properties=["id", "name"]) - - -class Identity(service.UnifiedService): - @classmethod - def is_applicable(cls, clients): - cloud_version = clients.keystone().version.split(".")[0][1:] - return cloud_version == cls._meta_get("impl")._meta_get("version") - - @service.should_be_overridden - def create_project(self, project_name=None, domain_name="Default"): - """Creates new project/tenant and return project object. - - :param project_name: Name of project to be created. - :param domain_name: Name or id of domain where to create project, for - those service implementations that don't support - domains you should use None or 'Default' value. - """ - return self._impl.create_project(project_name, - domain_name=domain_name) - - @service.should_be_overridden - def update_project(self, project_id, name=None, enabled=None, - description=None): - """Update project name, enabled and description - - :param project_id: Id of project to update - :param name: project name to be set - :param enabled: enabled status of project - :param description: project description to be set - """ - self._impl.update_project(project_id, name=name, enabled=enabled, - description=description) - - @service.should_be_overridden - def delete_project(self, project_id): - """Deletes project.""" - return self._impl.delete_project(project_id) - - @service.should_be_overridden - def list_projects(self): - """List all projects.""" - return self._impl.list_projects() - - @service.should_be_overridden - def get_project(self, project_id): - """Get project.""" - return self._impl.get_project(project_id) - - @service.should_be_overridden - def create_user(self, username=None, password=None, project_id=None, - domain_name="Default", enabled=True, - default_role="member"): - """Create user. - - :param username: name of user - :param password: user password - :param project_id: user's default project - :param domain_name: Name or id of domain where to create user, for - those service implementations that don't support - domains you should use None or 'Default' value. - :param enabled: whether the user is enabled. - :param default_role: Name of role, for implementations that don't - support domains this argument must be None or - 'member'. - """ - return self._impl.create_user(username=username, - password=password, - project_id=project_id, - domain_name=domain_name, - default_role=default_role) - - @service.should_be_overridden - def create_users(self, owner_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param owner_id: Id of tenant/project - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - return self._impl.create_users(owner_id, - number_of_users=number_of_users, - user_create_args=user_create_args) - - @service.should_be_overridden - def delete_user(self, user_id): - """Deletes user by its id.""" - self._impl.delete_user(user_id) - - @service.should_be_overridden - def list_users(self): - """List all users.""" - return self._impl.list_users() - - @service.should_be_overridden - def update_user(self, user_id, enabled=None, name=None, email=None, - password=None): - return self._impl.update_user(user_id, enabled=enabled, name=name, - email=email, password=password) - - @service.should_be_overridden - def get_user(self, user_id): - """Get user.""" - return self._impl.get_user(user_id) - - @service.should_be_overridden - def create_service(self, name=None, service_type=None, description=None): - """Creates keystone service with random name. - - :param name: name of service to create - :param service_type: type of the service - :param description: description of the service - """ - return self._impl.create_service(name=name, service_type=service_type, - description=description) - - @service.should_be_overridden - def delete_service(self, service_id): - """Deletes service.""" - self._impl.delete_service(service_id) - - @service.should_be_overridden - def list_services(self): - """List all services.""" - return self._impl.list_services() - - @service.should_be_overridden - def get_service(self, service_id): - """Get service.""" - return self._impl.get_service(service_id) - - @service.should_be_overridden - def create_role(self, name=None, domain_name=None): - """Create role with specific name - - :param name: role name - :param domain_name: Name or id of domain where to create role, for - those service implementations that don't support - domains you should use None or 'Default' value. - """ - return self._impl.create_role(name=name, domain_name=domain_name) - - @service.should_be_overridden - def add_role(self, role_id, user_id, project_id): - """Add role to user.""" - return self._impl.add_role(role_id=role_id, user_id=user_id, - project_id=project_id) - - @service.should_be_overridden - def delete_role(self, role_id): - """Deletes role.""" - self._impl.delete_role(role_id) - - @service.should_be_overridden - def revoke_role(self, role_id, user_id, project_id): - """Revokes a role from a user.""" - return self._impl.revoke_role(role_id=role_id, user_id=user_id, - project_id=project_id) - - @service.should_be_overridden - def list_roles(self, user_id=None, project_id=None, domain_name=None): - """List all roles. - - :param user_id: filter in role grants for the specified user on a - resource. Domain or project must be specified. - :param project_id: filter in role grants on the specified project. - user_id should be specified - :param domain_name: filter in role grants on the specified domain. - user_id should be specified - """ - return self._impl.list_roles(user_id=user_id, project_id=project_id, - domain_name=domain_name) - - @service.should_be_overridden - def get_role(self, role_id): - """Get role.""" - return self._impl.get_role(role_id) - - @service.should_be_overridden - def get_service_by_name(self, name): - """List all services to find proper one.""" - return self._impl.get_service_by_name(name) - - @service.should_be_overridden - def create_ec2credentials(self, user_id, project_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param project_id: Project ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._impl.create_ec2credentials(user_id=user_id, - project_id=project_id) - - @service.should_be_overridden - def list_ec2credentials(self, user_id): - """List of access/secret pairs for a user_id. - - :param user_id: List all ec2-credentials for User ID - - :returns: Return ec2-credentials list - """ - return self._impl.list_ec2credentials(user_id) - - @service.should_be_overridden - def delete_ec2credential(self, user_id, access): - """Delete ec2credential. - - :param user_id: User ID for which to delete credential - :param access: access key for ec2credential to delete - """ - return self._impl.delete_ec2credential(user_id=user_id, access=access) - - @service.should_be_overridden - def fetch_token(self): - """Authenticate user token.""" - return self._impl.fetch_token() - - @service.should_be_overridden - def validate_token(self, token): - """Validate user token. - - :param token: Auth token to validate - """ - return self._impl.validate_token(token) diff --git a/rally/plugins/openstack/services/identity/keystone_common.py b/rally/plugins/openstack/services/identity/keystone_common.py deleted file mode 100644 index 2c2b2e9ccf..0000000000 --- a/rally/plugins/openstack/services/identity/keystone_common.py +++ /dev/null @@ -1,192 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack import osclients -from rally.plugins.openstack.services.identity import identity -from rally.task import atomic - - -class UnifiedKeystoneMixin(object): - @staticmethod - def _unify_service(service): - return identity.Service(id=service.id, name=service.name) - - @staticmethod - def _unify_role(role): - return identity.Role(id=role.id, name=role.name) - - def delete_user(self, user_id): - """Deletes user by its id.""" - return self._impl.delete_user(user_id) - - def get_user(self, user_id): - """Get user.""" - return self._unify_user(self._impl.get_user(user_id)) - - def create_service(self, name=None, service_type=None, description=None): - """Creates keystone service.""" - - return self._unify_service(self._impl.create_service( - name=name, service_type=service_type, description=description)) - - def delete_service(self, service_id): - """Deletes service.""" - return self._impl.delete_service(service_id) - - def get_service(self, service_id): - """Get service.""" - return self._unify_service(self._impl.get_service(service_id)) - - def get_service_by_name(self, name): - """List all services to find proper one.""" - return self._unify_service(self._impl.get_service_by_name(name)) - - def get_role(self, role_id): - """Get role.""" - return self._unify_role(self._impl.get_role(role_id)) - - def delete_role(self, role_id): - """Deletes role.""" - return self._impl.delete_role(role_id) - - def list_ec2credentials(self, user_id): - """List of access/secret pairs for a user_id. - - :param user_id: List all ec2-credentials for User ID - - :returns: Return ec2-credentials list - """ - return self._impl.list_ec2credentials(user_id) - - def delete_ec2credential(self, user_id, access): - """Delete ec2credential. - - :param user_id: User ID for which to delete credential - :param access: access key for ec2credential to delete - """ - return self._impl.delete_ec2credential(user_id=user_id, access=access) - - def fetch_token(self): - """Authenticate user token.""" - return self._impl.fetch_token() - - def validate_token(self, token): - """Validate user token. - - :param token: Auth token to validate - """ - return self._impl.validate_token(token) - - -class KeystoneMixin(object): - - def list_users(self): - aname = "keystone_v%s.list_users" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).users.list() - - def delete_user(self, user_id): - """Deletes user by its id.""" - aname = "keystone_v%s.delete_user" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).users.delete(user_id) - - def get_user(self, user_id): - """Get user by its id.""" - aname = "keystone_v%s.get_user" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).users.get(user_id) - - def delete_service(self, service_id): - """Deletes service.""" - aname = "keystone_v%s.delete_service" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).services.delete(service_id) - - def list_services(self): - """List all services.""" - aname = "keystone_v%s.list_services" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).services.list() - - def get_service(self, service_id): - """Get service.""" - aname = "keystone_v%s.get_services" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).services.get( - service_id) - - def get_service_by_name(self, name): - """List all services to find proper one.""" - for s in self.list_services(): - if s.name == name: - return s - - def delete_role(self, role_id): - """Deletes role.""" - aname = "keystone_v%s.delete_role" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).roles.delete(role_id) - - def list_roles(self): - """List all roles.""" - aname = "keystone_v%s.list_roles" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).roles.list() - - def get_role(self, role_id): - """Get role.""" - aname = "keystone_v%s.get_role" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).roles.get(role_id) - - def list_ec2credentials(self, user_id): - """List of access/secret pairs for a user_id. - - :param user_id: List all ec2-credentials for User ID - - :returns: Return ec2-credentials list - """ - aname = "keystone_v%s.list_ec2creds" % self.version - with atomic.ActionTimer(self, aname): - return self._clients.keystone(self.version).ec2.list(user_id) - - def delete_ec2credential(self, user_id, access): - """Delete ec2credential. - - :param user_id: User ID for which to delete credential - :param access: access key for ec2credential to delete - """ - aname = "keystone_v%s.delete_ec2creds" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).ec2.delete(user_id=user_id, - access=access) - - def fetch_token(self): - """Authenticate user token.""" - cred = self._clients.credential - aname = "keystone_v%s.fetch_token" % self.version - with atomic.ActionTimer(self, aname): - clients = osclients.Clients(credential=cred, - api_info=self._clients.api_info) - return clients.keystone.auth_ref.auth_token - - def validate_token(self, token): - """Validate user token. - - :param token: Auth token to validate - """ - aname = "keystone_v%s.validate_token" % self.version - with atomic.ActionTimer(self, aname): - self._clients.keystone(self.version).tokens.validate(token) diff --git a/rally/plugins/openstack/services/identity/keystone_v2.py b/rally/plugins/openstack/services/identity/keystone_v2.py deleted file mode 100644 index 6212560e42..0000000000 --- a/rally/plugins/openstack/services/identity/keystone_v2.py +++ /dev/null @@ -1,316 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from rally.plugins.openstack import service -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.services.identity import keystone_common -from rally.task import atomic - - -@service.service("keystone", service_type="identity", version="2") -class KeystoneV2Service(service.Service, keystone_common.KeystoneMixin): - - @atomic.action_timer("keystone_v2.create_tenant") - def create_tenant(self, tenant_name=None): - tenant_name = tenant_name or self.generate_random_name() - return self._clients.keystone("2").tenants.create(tenant_name) - - @atomic.action_timer("keystone_v2.update_tenant") - def update_tenant(self, tenant_id, name=None, enabled=None, - description=None): - """Update tenant name and description. - - :param tenant_id: Id of tenant to update - :param name: tenant name to be set (if boolean True, random name will - be set) - :param enabled: enabled status of project - :param description: tenant description to be set (if boolean True, - random description will be set) - """ - if name is True: - name = self.generate_random_name() - if description is True: - description = self.generate_random_name() - self._clients.keystone("2").tenants.update( - tenant_id, name=name, description=description, enabled=enabled) - - @atomic.action_timer("keystone_v2.delete_tenant") - def delete_tenant(self, tenant_id): - return self._clients.keystone("2").tenants.delete(tenant_id) - - @atomic.action_timer("keystone_v2.list_tenants") - def list_tenants(self): - return self._clients.keystone("2").tenants.list() - - @atomic.action_timer("keystone_v2.get_tenant") - def get_tenant(self, tenant_id): - """Get tenant.""" - return self._clients.keystone("2").tenants.get(tenant_id) - - @atomic.action_timer("keystone_v2.create_user") - def create_user(self, username=None, password=None, email=None, - tenant_id=None, enabled=True): - username = username or self.generate_random_name() - password = password or str(uuid.uuid4()) - email = email or (username + "@rally.me") - return self._clients.keystone("2").users.create(name=username, - password=password, - email=email, - tenant_id=tenant_id, - enabled=enabled) - - @atomic.action_timer("keystone_v2.create_users") - def create_users(self, tenant_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param tenant_id: Id of tenant - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - users = [] - for _i in range(number_of_users): - users.append(self.create_user(tenant_id=tenant_id, - **(user_create_args or {}))) - return users - - @atomic.action_timer("keystone_v2.update_user") - def update_user(self, user_id, **kwargs): - allowed_args = ("name", "email", "enabled") - restricted = set(kwargs) - set(allowed_args) - if restricted: - raise NotImplementedError( - "Failed to update '%s', since Keystone V2 allows to update " - "only '%s'." % ("', '".join(restricted), - "', '".join(allowed_args))) - self._clients.keystone("2").users.update(user_id, **kwargs) - - @atomic.action_timer("keystone_v2.update_user_password") - def update_user_password(self, user_id, password): - self._clients.keystone("2").users.update_password(user_id, - password=password) - - @atomic.action_timer("keystone_v2.create_service") - def create_service(self, name=None, service_type=None, description=None): - """Creates keystone service. - - :param name: name of service to create - :param service_type: type of the service - :param description: description of the service - :returns: keystone service instance - """ - name = name or self.generate_random_name() - service_type = service_type or "rally_test_type" - description = description or self.generate_random_name() - return self._clients.keystone("2").services.create( - name, - service_type=service_type, - description=description) - - @atomic.action_timer("keystone_v2.create_role") - def create_role(self, name=None): - name = name or self.generate_random_name() - return self._clients.keystone("2").roles.create(name) - - @atomic.action_timer("keystone_v2.add_role") - def add_role(self, role_id, user_id, tenant_id): - self._clients.keystone("2").roles.add_user_role( - user=user_id, role=role_id, tenant=tenant_id) - - @atomic.action_timer("keystone_v2.list_roles") - def list_roles(self): - """List all roles.""" - return self._clients.keystone("2").roles.list() - - @atomic.action_timer("keystone_v2.list_roles_for_user") - def list_roles_for_user(self, user_id, tenant_id=None): - return self._clients.keystone("2").roles.roles_for_user( - user_id, tenant_id) - - @atomic.action_timer("keystone_v2.revoke_role") - def revoke_role(self, role_id, user_id, tenant_id): - self._clients.keystone("2").roles.remove_user_role(user=user_id, - role=role_id, - tenant=tenant_id) - - @atomic.action_timer("keystone_v2.create_ec2creds") - def create_ec2credentials(self, user_id, tenant_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param tenant_id: Tenant ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._clients.keystone("2").ec2.create(user_id, - tenant_id=tenant_id) - - -@service.compat_layer(KeystoneV2Service) -class UnifiedKeystoneV2Service(keystone_common.UnifiedKeystoneMixin, - identity.Identity): - """Compatibility layer for Keystone V2.""" - - @staticmethod - def _check_domain(domain_name): - if domain_name.lower() != "default": - raise NotImplementedError("Domain functionality not implemented " - "in Keystone v2") - - @staticmethod - def _unify_tenant(tenant): - return identity.Project(id=tenant.id, name=tenant.name, - domain_id="default") - - @staticmethod - def _unify_user(user): - return identity.User(id=user.id, name=user.name, - project_id=getattr(user, "tenantId", None), - domain_id="default") - - def create_project(self, project_name=None, domain_name="Default"): - """Creates new project/tenant and return project object. - - :param project_name: Name of project to be created. - :param domain_name: Restricted for Keystone V2. Should not be set or - "Default" is expected. - """ - self._check_domain(domain_name) - tenant = self._impl.create_tenant(project_name) - return self._unify_tenant(tenant) - - def update_project(self, project_id, name=None, enabled=None, - description=None): - """Update project name, enabled and description - - :param project_id: Id of project to update - :param name: project name to be set - :param enabled: enabled status of project - :param description: project description to be set - """ - self._impl.update_tenant(tenant_id=project_id, name=name, - enabled=enabled, description=description) - - def delete_project(self, project_id): - """Deletes project.""" - return self._impl.delete_tenant(project_id) - - def list_projects(self): - """List all projects.""" - return [self._unify_tenant(t) for t in self._impl.list_tenants()] - - def get_project(self, project_id): - """Get project.""" - return self._unify_tenant(self._impl.get_tenant(project_id)) - - def create_user(self, username=None, password=None, project_id=None, - domain_name="Default", enabled=True, - default_role="member"): - """Create user. - - :param username: name of user - :param password: user password - :param project_id: user's default project - :param domain_name: Restricted for Keystone V2. Should not be set or - "Default" is expected. - :param enabled: whether the user is enabled. - :param default_role: Restricted for Keystone V2. Should not be set or - "member" is expected. - """ - self._check_domain(domain_name) - user = self._impl.create_user(username=username, - password=password, - tenant_id=project_id, - enabled=enabled) - return self._unify_user(user) - - def create_users(self, tenant_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param tenant_id: Id of tenant - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - if user_create_args and "domain_name" in user_create_args: - self._check_domain(user_create_args["domain_name"]) - return [self._unify_user(u) - for u in self._impl.create_users( - tenant_id=tenant_id, number_of_users=number_of_users, - user_create_args=user_create_args)] - - def list_users(self): - """List all users.""" - return [self._unify_user(u) for u in self._impl.list_users()] - - def update_user(self, user_id, enabled=None, name=None, email=None, - password=None): - if password is not None: - self._impl.update_user_password(user_id=user_id, password=password) - - update_args = {} - if enabled is not None: - update_args["enabled"] = enabled - if name is not None: - update_args["name"] = name - if email is not None: - update_args["email"] = email - - if update_args: - self._impl.update_user(user_id, **update_args) - - def list_services(self): - """List all services.""" - return [self._unify_service(s) for s in self._impl.list_services()] - - def create_role(self, name=None, domain_name=None): - """Add role to user.""" - if domain_name is not None: - raise NotImplementedError("Domain functionality not implemented " - "in Keystone v2") - - return self._unify_role(self._impl.create_role(name)) - - def add_role(self, role_id, user_id, project_id): - """Add role to user.""" - self._impl.add_role(role_id=role_id, user_id=user_id, - tenant_id=project_id) - - def revoke_role(self, role_id, user_id, project_id): - """Revokes a role from a user.""" - return self._impl.revoke_role(role_id=role_id, user_id=user_id, - tenant_id=project_id) - - def list_roles(self, user_id=None, project_id=None, domain_name=None): - """List all roles.""" - if domain_name: - raise NotImplementedError("Domain functionality not implemented " - "in Keystone v2") - if user_id: - roles = self._impl.list_roles_for_user(user_id, - tenant_id=project_id) - else: - roles = self._impl.list_roles() - return [self._unify_role(role) for role in roles] - - def create_ec2credentials(self, user_id, project_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param project_id: Project ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._impl.create_ec2credentials(user_id=user_id, - tenant_id=project_id) diff --git a/rally/plugins/openstack/services/identity/keystone_v3.py b/rally/plugins/openstack/services/identity/keystone_v3.py deleted file mode 100644 index e2bce6582e..0000000000 --- a/rally/plugins/openstack/services/identity/keystone_v3.py +++ /dev/null @@ -1,340 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack import service -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.services.identity import keystone_common -from rally.task import atomic - - -LOG = logging.getLogger(__name__) - - -@service.service("keystone", service_type="identity", version="3") -class KeystoneV3Service(service.Service, keystone_common.KeystoneMixin): - - def _get_domain_id(self, domain_name_or_id): - from keystoneclient import exceptions as kc_exceptions - - try: - # First try to find domain by ID - return self._clients.keystone("3").domains.get( - domain_name_or_id).id - except kc_exceptions.NotFound: - # Domain not found by ID, try to find it by name - domains = self._clients.keystone("3").domains.list( - name=domain_name_or_id) - if domains: - return domains[0].id - # Domain not found by name - raise exceptions.GetResourceNotFound( - resource="KeystoneDomain(%s)" % domain_name_or_id) - - @atomic.action_timer("keystone_v3.create_project") - def create_project(self, project_name=None, domain_name="Default"): - project_name = project_name or self.generate_random_name() - domain_id = self._get_domain_id(domain_name) - return self._clients.keystone("3").projects.create(name=project_name, - domain=domain_id) - - @atomic.action_timer("keystone_v3.update_project") - def update_project(self, project_id, name=None, enabled=None, - description=None): - """Update tenant name and description. - - :param project_id: Id of project to update - :param name: project name to be set (if boolean True, random name will - be set) - :param enabled: enabled status of project - :param description: project description to be set (if boolean True, - random description will be set) - """ - if name is True: - name = self.generate_random_name() - if description is True: - description = self.generate_random_name() - self._clients.keystone("3").projects.update( - project_id, name=name, description=description, enabled=enabled) - - @atomic.action_timer("keystone_v3.delete_project") - def delete_project(self, project_id): - self._clients.keystone("3").projects.delete(project_id) - - @atomic.action_timer("keystone_v3.list_projects") - def list_projects(self): - return self._clients.keystone("3").projects.list() - - @atomic.action_timer("keystone_v3.get_project") - def get_project(self, project_id): - """Get project.""" - return self._clients.keystone("3").projects.get(project_id) - - @atomic.action_timer("keystone_v3.create_user") - def create_user(self, username=None, password=None, project_id=None, - domain_name="Default", enabled=True, - default_role="member"): - """Create user. - - - :param username: name of user - :param password: user password - :param project_id: user's default project - :param domain_name: Name or id of domain where to create project. - :param enabled: whether the user is enabled. - :param default_role: user's default role - """ - domain_id = self._get_domain_id(domain_name) - username = username or self.generate_random_name() - user = self._clients.keystone("3").users.create( - name=username, password=password, default_project=project_id, - domain=domain_id, enabled=enabled) - - if project_id: - # we can't setup role without project_id - roles = self.list_roles() - for role in roles: - if default_role == role.name.lower(): - self.add_role(role_id=role.id, - user_id=user.id, - project_id=project_id) - return user - for role in roles: - if default_role == role.name.lower().strip("_"): - self.add_role(role_id=role.id, - user_id=user.id, - project_id=project_id) - return user - - LOG.warning("Unable to set %s role to created user." % - default_role) - return user - - @atomic.action_timer("keystone_v3.create_users") - def create_users(self, project_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param project_id: Id of project - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - users = [] - for _i in range(number_of_users): - users.append(self.create_user(project_id=project_id, - **(user_create_args or {}))) - return users - - @atomic.action_timer("keystone_v3.update_user") - def update_user(self, user_id, name=None, domain_name=None, - project_id=None, password=None, email=None, - description=None, enabled=None, default_project=None): - domain = None - if domain_name: - domain = self._get_domain_id(domain_name) - - self._clients.keystone("3").users.update( - user_id, name=name, domain=domain, project=project_id, - password=password, email=email, description=description, - enabled=enabled, default_project=default_project) - - @atomic.action_timer("keystone_v3.create_service") - def create_service(self, name=None, service_type=None, description=None, - enabled=True): - """Creates keystone service. - - :param name: name of service to create - :param service_type: type of the service - :param description: description of the service - :param enabled: whether the service appears in the catalog - :returns: keystone service instance - """ - name = name or self.generate_random_name() - service_type = service_type or "rally_test_type" - description = description or self.generate_random_name() - return self._clients.keystone("3").services.create( - name, type=service_type, description=description, enabled=enabled) - - @atomic.action_timer("keystone_v3.create_role") - def create_role(self, name=None, domain_name=None): - domain_id = None - if domain_name: - domain_id = self._get_domain_id(domain_name) - name = name or self.generate_random_name() - return self._clients.keystone("3").roles.create(name, domain=domain_id) - - @atomic.action_timer("keystone_v3.add_role") - def add_role(self, role_id, user_id, project_id): - self._clients.keystone("3").roles.grant(role=role_id, - user=user_id, - project=project_id) - - @atomic.action_timer("keystone_v3.list_roles") - def list_roles(self, user_id=None, project_id=None, domain_name=None): - """List all roles.""" - domain_id = None - if domain_name: - domain_id = self._get_domain_id(domain_name) - return self._clients.keystone("3").roles.list(user=user_id, - project=project_id, - domain=domain_id) - - @atomic.action_timer("keystone_v3.revoke_role") - def revoke_role(self, role_id, user_id, project_id): - self._clients.keystone("3").roles.revoke(role=role_id, - user=user_id, - project=project_id) - - @atomic.action_timer("keystone_v3.create_domain") - def create_domain(self, name, description=None, enabled=True): - return self._clients.keystone("3").domains.create( - name, description=description, enabled=enabled) - - @atomic.action_timer("keystone_v3.create_ec2creds") - def create_ec2credentials(self, user_id, project_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param project_id: Tenant ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._clients.keystone("3").ec2.create(user_id, - project_id=project_id) - - -@service.compat_layer(KeystoneV3Service) -class UnifiedKeystoneV3Service(keystone_common.UnifiedKeystoneMixin, - identity.Identity): - - @staticmethod - def _unify_project(project): - return identity.Project(id=project.id, name=project.name, - domain_id=project.domain_id) - - @staticmethod - def _unify_user(user): - # When user has default_project_id that is None user.default_project_id - # will raise AttributeError - project_id = getattr(user, "project_id", - getattr(user, "default_project_id", None)) - return identity.User(id=user.id, name=user.name, project_id=project_id, - domain_id=user.domain_id) - - def create_project(self, project_name=None, domain_name="Default"): - """Creates new project/tenant and return project object. - - :param project_name: Name of project to be created. - :param domain_name: Name or id of domain where to create project, - """ - project = self._impl.create_project(project_name, - domain_name=domain_name) - return self._unify_project(project) - - def update_project(self, project_id, name=None, enabled=None, - description=None): - """Update project name, enabled and description - - :param project_id: Id of project to update - :param name: project name to be set - :param enabled: enabled status of project - :param description: project description to be set - """ - self._impl.update_project(project_id=project_id, name=name, - enabled=enabled, description=description) - - def delete_project(self, project_id): - """Deletes project.""" - return self._impl.delete_project(project_id) - - def list_projects(self): - """List all projects.""" - return [self._unify_project(p) for p in self._impl.list_projects()] - - def get_project(self, project_id): - """Get project.""" - return self._unify_project(self._impl.get_project(project_id)) - - def create_user(self, username=None, password=None, project_id=None, - domain_name="Default", enabled=True, - default_role="member"): - """Create user. - - :param username: name of user - :param password: user password - :param project_id: user's default project - :param domain_name: Name or id of domain where to create project, - :param enabled: whether the user is enabled. - :param default_role: Name of default user's role - """ - return self._unify_user(self._impl.create_user( - username=username, password=password, project_id=project_id, - domain_name=domain_name, default_role=default_role, - enabled=enabled)) - - def create_users(self, project_id, number_of_users, user_create_args=None): - """Create specified amount of users. - - :param project_id: Id of project - :param number_of_users: number of users to create - :param user_create_args: additional user creation arguments - """ - return [self._unify_user(u) - for u in self._impl.create_users( - project_id=project_id, number_of_users=number_of_users, - user_create_args=user_create_args)] - - def list_users(self): - """List all users.""" - return [self._unify_user(u) for u in self._impl.list_users()] - - def update_user(self, user_id, enabled=None, name=None, email=None, - password=None): - return self._impl.update_user(user_id, enabled=enabled, name=name, - email=email, password=password) - - def list_services(self): - """List all services.""" - return [self._unify_service(s) for s in self._impl.list_services()] - - def create_role(self, name=None, domain_name=None): - """Add role to user.""" - return self._unify_role(self._impl.create_role( - name, domain_name=domain_name)) - - def add_role(self, role_id, user_id, project_id): - """Add role to user.""" - self._impl.add_role(role_id=role_id, user_id=user_id, - project_id=project_id) - - def revoke_role(self, role_id, user_id, project_id): - """Revokes a role from a user.""" - return self._impl.revoke_role(role_id=role_id, user_id=user_id, - project_id=project_id) - - def list_roles(self, user_id=None, project_id=None, domain_name=None): - """List all roles.""" - return [self._unify_role(role) for role in self._impl.list_roles( - user_id=user_id, project_id=project_id, domain_name=domain_name)] - - def create_ec2credentials(self, user_id, project_id): - """Create ec2credentials. - - :param user_id: User ID for which to create credentials - :param project_id: Project ID for which to create credentials - - :returns: Created ec2-credentials object - """ - return self._impl.create_ec2credentials(user_id=user_id, - project_id=project_id) diff --git a/rally/plugins/openstack/services/image/__init__.py b/rally/plugins/openstack/services/image/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/services/image/glance_common.py b/rally/plugins/openstack/services/image/glance_common.py deleted file mode 100644 index c41ca63f54..0000000000 --- a/rally/plugins/openstack/services/image/glance_common.py +++ /dev/null @@ -1,92 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from glanceclient import exc as glance_exc - -from rally import exceptions -from rally.plugins.openstack.services.image import image as image_service -from rally.task import atomic - - -class GlanceMixin(object): - - def _get_client(self): - return self._clients.glance(self.version) - - def get_image(self, image): - """Get specified image. - - :param image: ID or object with ID of image to obtain. - """ - image_id = getattr(image, "id", image) - try: - aname = "glance_v%s.get_image" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().images.get(image_id) - except glance_exc.HTTPNotFound: - raise exceptions.GetResourceNotFound(resource=image) - - def delete_image(self, image_id): - """Delete image.""" - aname = "glance_v%s.delete_image" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().images.delete(image_id) - - def download_image(self, image_id, do_checksum=True): - """Retrieve data of an image. - - :param image_id: ID of the image to download. - :param do_checksum: Enable/disable checksum validation. - :returns: An iterable body or None - """ - aname = "glance_v%s.download_image" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().images.data(image_id, - do_checksum=do_checksum) - - -class UnifiedGlanceMixin(object): - - @staticmethod - def _unify_image(image): - if hasattr(image, "visibility"): - return image_service.UnifiedImage(id=image.id, name=image.name, - status=image.status, - visibility=image.visibility) - else: - return image_service.UnifiedImage( - id=image.id, name=image.name, - status=image.status, - visibility=("public" if image.is_public else "private")) - - def get_image(self, image): - """Get specified image. - - :param image: ID or object with ID of image to obtain. - """ - image_obj = self._impl.get_image(image=image) - return self._unify_image(image_obj) - - def delete_image(self, image_id): - """Delete image.""" - self._impl.delete_image(image_id=image_id) - - def download_image(self, image_id, do_checksum=True): - """Download data for an image. - - :param image_id: image id to look up - :param do_checksum: Enable/disable checksum validation - :rtype: iterable containing image data or None - """ - return self._impl.download_image(image_id, do_checksum=do_checksum) diff --git a/rally/plugins/openstack/services/image/glance_v1.py b/rally/plugins/openstack/services/image/glance_v1.py deleted file mode 100644 index a7f979ed94..0000000000 --- a/rally/plugins/openstack/services/image/glance_v1.py +++ /dev/null @@ -1,209 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from rally.common import cfg -from rally.common import utils as rutils -from rally.plugins.openstack import service -from rally.plugins.openstack.services.image import glance_common -from rally.plugins.openstack.services.image import image -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF - - -@service.service("glance", service_type="image", version="1") -class GlanceV1Service(service.Service, glance_common.GlanceMixin): - - @atomic.action_timer("glance_v1.create_image") - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - is_public=True, min_disk=0, min_ram=0, - properties=None): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param is_public: The created image's public status - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: Dict of image properties - """ - image_location = os.path.expanduser(image_location) - image_name = image_name or self.generate_random_name() - kwargs = {} - - try: - if os.path.isfile(image_location): - kwargs["data"] = open(image_location) - else: - kwargs["copy_from"] = image_location - - image_obj = self._clients.glance("1").images.create( - name=image_name, - container_format=container_format, - disk_format=disk_format, - is_public=is_public, - min_disk=min_disk, - min_ram=min_ram, - properties=properties, - **kwargs) - - rutils.interruptable_sleep(CONF.openstack. - glance_image_create_prepoll_delay) - - image_obj = utils.wait_for_status( - image_obj, ["active"], - update_resource=self.get_image, - timeout=CONF.openstack.glance_image_create_timeout, - check_interval=CONF.openstack.glance_image_create_poll_interval - ) - - finally: - if "data" in kwargs: - kwargs["data"].close() - - return image_obj - - @atomic.action_timer("glance_v1.update_image") - def update_image(self, image_id, image_name=None, min_disk=0, - min_ram=0): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - """ - image_name = image_name or self.generate_random_name() - - return self._clients.glance("1").images.update(image_id, - name=image_name, - min_disk=min_disk, - min_ram=min_ram) - - @atomic.action_timer("glance_v1.list_images") - def list_images(self, status="active", is_public=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param is_public: Filter in images for the specified public status - :param owner: Filter in images for tenant ID - """ - # NOTE(boris-42): image.list() is lazy method which doesn't query API - # until it's used, do not remove list(). - return list(self._clients.glance("1").images.list(status=status, - owner=owner, - is_public=is_public)) - - @atomic.action_timer("glance_v1.set_visibility") - def set_visibility(self, image_id, is_public=True): - """Update visibility. - - :param image_id: ID of image to update - :param is_public: Image is public or not - """ - self._clients.glance("1").images.update(image_id, is_public=is_public) - - -@service.compat_layer(GlanceV1Service) -class UnifiedGlanceV1Service(glance_common.UnifiedGlanceMixin, image.Image): - """Compatibility layer for Glance V1.""" - - @staticmethod - def _check_v1_visibility(visibility): - visibility_values = ["public", "private"] - if visibility and visibility not in visibility_values: - raise image.VisibilityException( - message="Improper visibility value: %s in glance_v1" - % visibility) - - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - visibility="public", min_disk=0, - min_ram=0, properties=None): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param visibility: The created image's visible status - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: Dict of image properties - """ - self._check_v1_visibility(visibility) - - is_public = visibility != "private" - image_obj = self._impl.create_image( - image_name=image_name, - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - is_public=is_public, - min_disk=min_disk, - min_ram=min_ram, - properties=properties) - return self._unify_image(image_obj) - - def update_image(self, image_id, image_name=None, min_disk=0, - min_ram=0, remove_props=None): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - :param remove_props: List of property names to remove - """ - if remove_props is not None: - raise image.RemovePropsException("Remove prop: %s is not" - "supported in" - "glance_v1" % remove_props) - image_obj = self._impl.update_image( - image_id=image_id, - image_name=image_name, - min_disk=min_disk, - min_ram=min_ram) - return self._unify_image(image_obj) - - def list_images(self, status="active", visibility=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param visibility: Filter in images for the specified visibility - :param owner: Filter in images for tenant ID - """ - self._check_v1_visibility(visibility) - - is_public = visibility != "private" - - images = self._impl.list_images(status=status, is_public=is_public) - return [self._unify_image(i) for i in images] - - def set_visibility(self, image_id, visibility="public"): - """Update visibility. - - :param image_id: ID of image to update - :param visibility: The visibility of specified image - """ - self._check_v1_visibility(visibility) - - is_public = visibility != "private" - self._impl.set_visibility(image_id=image_id, is_public=is_public) diff --git a/rally/plugins/openstack/services/image/glance_v2.py b/rally/plugins/openstack/services/image/glance_v2.py deleted file mode 100644 index 575e2c36d9..0000000000 --- a/rally/plugins/openstack/services/image/glance_v2.py +++ /dev/null @@ -1,241 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import time - -import requests - -from rally.common import cfg -from rally.common import utils as rutils -from rally.plugins.openstack import service -from rally.plugins.openstack.services.image import glance_common -from rally.plugins.openstack.services.image import image -from rally.task import atomic -from rally.task import utils - -CONF = cfg.CONF - - -@service.service("glance", service_type="image", version="2") -class GlanceV2Service(service.Service, glance_common.GlanceMixin): - - @atomic.action_timer("glance_v2.upload_data") - def upload_data(self, image_id, image_location): - """Upload the data for an image. - - :param image_id: Image ID to upload data to. - :param image_location: Location of the data to upload to. - """ - image_location = os.path.expanduser(image_location) - image_data = None - response = None - try: - if os.path.isfile(image_location): - image_data = open(image_location) - else: - response = requests.get(image_location, stream=True) - image_data = response.raw - self._clients.glance("2").images.upload(image_id, image_data) - finally: - if image_data is not None: - image_data.close() - if response is not None: - response.close() - - @atomic.action_timer("glance_v2.create_image") - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - visibility=None, min_disk=0, - min_ram=0, properties=None): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param visibility: The created image's visible status. - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: Dict of image properties - """ - image_name = image_name or self.generate_random_name() - - properties = properties or {} - image_obj = self._clients.glance("2").images.create( - name=image_name, - container_format=container_format, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram, - **properties) - - rutils.interruptable_sleep(CONF.openstack. - glance_image_create_prepoll_delay) - - start = time.time() - image_obj = utils.wait_for_status( - image_obj.id, ["queued"], - update_resource=self.get_image, - timeout=CONF.openstack.glance_image_create_timeout, - check_interval=CONF.openstack.glance_image_create_poll_interval) - timeout = time.time() - start - - self.upload_data(image_obj.id, image_location=image_location) - - image_obj = utils.wait_for_status( - image_obj, ["active"], - update_resource=self.get_image, - timeout=timeout, - check_interval=CONF.openstack.glance_image_create_poll_interval) - return image_obj - - @atomic.action_timer("glance_v2.update_image") - def update_image(self, image_id, image_name=None, min_disk=0, - min_ram=0, remove_props=None): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - :param remove_props: List of property names to remove - """ - image_name = image_name or self.generate_random_name() - - return self._clients.glance("2").images.update( - image_id=image_id, - name=image_name, - min_disk=min_disk, - min_ram=min_ram, - remove_props=remove_props) - - @atomic.action_timer("glance_v2.list_images") - def list_images(self, status="active", visibility=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param visibility: Filter in images for the specified visibility - :param owner: Filter in images for tenant ID - """ - filters = {} - filters["status"] = status - if visibility: - filters["visibility"] = visibility - if owner: - filters["owner"] = owner - # NOTE(boris-42): image.list() is lazy method which doesn't query API - # until it's used, do not remove list(). - return list(self._clients.glance("2").images.list(filters=filters)) - - @atomic.action_timer("glance_v2.set_visibility") - def set_visibility(self, image_id, visibility="shared"): - """Update visibility. - - :param image_id: ID of image to update - :param visibility: The visibility of specified image - """ - self._clients.glance("2").images.update(image_id, - visibility=visibility) - - @atomic.action_timer("glance_v2.deactivate_image") - def deactivate_image(self, image_id): - """deactivate image.""" - self._clients.glance("2").images.deactivate(image_id) - - @atomic.action_timer("glance_v2.reactivate_image") - def reactivate_image(self, image_id): - """reactivate image.""" - self._clients.glance("2").images.reactivate(image_id) - - -@service.compat_layer(GlanceV2Service) -class UnifiedGlanceV2Service(glance_common.UnifiedGlanceMixin, image.Image): - """Compatibility layer for Glance V2.""" - - @staticmethod - def _check_v2_visibility(visibility): - visibility_values = ["public", "private", "shared", "community"] - if visibility and visibility not in visibility_values: - raise image.VisibilityException( - message="Improper visibility value: %s in glance_v2" - % visibility) - - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - visibility=None, min_disk=0, - min_ram=0, properties=None): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param visibility: The access permission for the created image. - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: Dict of image properties - """ - image_obj = self._impl.create_image( - image_name=image_name, - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram, - properties=properties) - return self._unify_image(image_obj) - - def update_image(self, image_id, image_name=None, min_disk=0, - min_ram=0, remove_props=None): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - :param remove_props: List of property names to remove - """ - image_obj = self._impl.update_image( - image_id=image_id, - image_name=image_name, - min_disk=min_disk, - min_ram=min_ram, - remove_props=remove_props) - return self._unify_image(image_obj) - - def list_images(self, status="active", visibility=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param visibility: Filter in images for the specified visibility - :param owner: Filter in images for tenant ID - """ - self._check_v2_visibility(visibility) - - images = self._impl.list_images( - status=status, visibility=visibility, owner=owner) - return [self._unify_image(i) for i in images] - - def set_visibility(self, image_id, visibility="shared"): - """Update visibility. - - :param image_id: ID of image to update - :param visibility: The visibility of specified image - """ - self._check_v2_visibility(visibility) - - self._impl.set_visibility(image_id=image_id, visibility=visibility) diff --git a/rally/plugins/openstack/services/image/image.py b/rally/plugins/openstack/services/image/image.py deleted file mode 100644 index 1efbf36c8c..0000000000 --- a/rally/plugins/openstack/services/image/image.py +++ /dev/null @@ -1,133 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally import exceptions -from rally.task import service - - -CONF = cfg.CONF - -UnifiedImage = service.make_resource_cls( - "Image", properties=["id", "name", "visibility", "status"]) - - -class VisibilityException(exceptions.RallyException): - """Wrong visibility value exception. - - """ - error_code = 250 - - -class RemovePropsException(exceptions.RallyException): - """Remove Props it not supported exception. - - """ - error_code = 251 - - -class Image(service.UnifiedService): - @classmethod - def is_applicable(cls, clients): - cloud_version = str(clients.glance().version).split(".")[0] - return cloud_version == cls._meta_get("impl")._meta_get("version") - - @service.should_be_overridden - def create_image(self, image_name=None, container_format=None, - image_location=None, disk_format=None, - visibility="private", min_disk=0, - min_ram=0, properties=None): - """Creates new image. - - :param image_name: Image name for which need to be created - :param container_format: Container format - :param image_location: The new image's location - :param disk_format: Disk format - :param visibility: The access permission for the created image. - :param min_disk: The min disk of created images - :param min_ram: The min ram of created images - :param properties: Dict of image properties - """ - properties = properties or {} - image = self._impl.create_image( - image_name=image_name, - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram, - properties=properties) - return image - - @service.should_be_overridden - def update_image(self, image_id, image_name=None, - min_disk=0, min_ram=0, remove_props=None): - """Update image. - - :param image_id: ID of image to update - :param image_name: Image name to be updated to - :param min_disk: The min disk of updated image - :param min_ram: The min ram of updated image - :param remove_props: List of property names to remove - """ - return self._impl.update_image( - image_id, - image_name=image_name, - min_disk=min_disk, - min_ram=min_ram, - remove_props=remove_props) - - @service.should_be_overridden - def list_images(self, status="active", visibility=None, owner=None): - """List images. - - :param status: Filter in images for the specified status - :param visibility: Filter in images for the specified visibility - :param owner: Filter in images for tenant ID - """ - return self._impl.list_images(status=status, - visibility=visibility, - owner=owner) - - @service.should_be_overridden - def set_visibility(self, image_id, visibility="public"): - """Update visibility. - - :param image_id: ID of image to update - :param visibility: The visibility of specified image - """ - self._impl.set_visibility(image_id, visibility=visibility) - - @service.should_be_overridden - def get_image(self, image): - """Get specified image. - - :param image: ID or object with ID of image to obtain. - """ - return self._impl.get_image(image) - - @service.should_be_overridden - def delete_image(self, image_id): - """delete image.""" - self._impl.delete_image(image_id) - - @service.should_be_overridden - def download_image(self, image, do_checksum=True): - """Download data for an image. - - :param image: image object or id to look up - :param do_checksum: Enable/disable checksum validation - :rtype: iterable containing image data or None - """ - return self._impl.download_image(image, do_checksum=do_checksum) diff --git a/rally/plugins/openstack/services/storage/__init__.py b/rally/plugins/openstack/services/storage/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/services/storage/block.py b/rally/plugins/openstack/services/storage/block.py deleted file mode 100644 index 8a74fde089..0000000000 --- a/rally/plugins/openstack/services/storage/block.py +++ /dev/null @@ -1,440 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import cfg -from rally.task import service - - -CONF = cfg.CONF - - -Volume = service.make_resource_cls( - "Volume", properties=["id", "name", "size", "status"]) -VolumeSnapshot = service.make_resource_cls( - "VolumeSnapshot", properties=["id", "name", "volume_id", "status"]) -VolumeBackup = service.make_resource_cls( - "VolumeBackup", properties=["id", "name", "volume_id", "status"]) -VolumeTransfer = service.make_resource_cls( - "VolumeTransfer", properties=["id", "name", "volume_id", "auth_key"]) -VolumeEncryptionType = service.make_resource_cls( - "VolumeEncryptionType", properties=["id", "volume_type_id"]) -QoSSpecs = service.make_resource_cls( - "QoSSpecs", properties=["id", "name", "specs"]) - - -class BlockStorage(service.UnifiedService): - - @service.should_be_overridden - def create_volume(self, size, consistencygroup_id=None, - group_id=None, snapshot_id=None, source_volid=None, - name=None, description=None, - volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None, scheduler_hints=None, - source_replica=None, multiattach=False): - """Creates a volume. - - :param size: Size of volume in GB - :param consistencygroup_id: ID of the consistencygroup - :param group_id: ID of the group - :param snapshot_id: ID of the snapshot - :param name: Name of the volume - :param description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - :param source_volid: ID of source volume to clone from - :param source_replica: ID of source volume to clone replica - :param scheduler_hints: (optional extension) arbitrary key-value pairs - specified by the client to help boot an instance - :param multiattach: Allow the volume to be attached to more than - one instance - - :returns: Return a new volume. - """ - return self._impl.create_volume( - size, consistencygroup_id=consistencygroup_id, group_id=group_id, - snapshot_id=snapshot_id, source_volid=source_volid, - name=name, description=description, volume_type=volume_type, - user_id=user_id, project_id=project_id, - availability_zone=availability_zone, metadata=metadata, - imageRef=imageRef, scheduler_hints=scheduler_hints, - source_replica=source_replica, multiattach=multiattach) - - @service.should_be_overridden - def list_volumes(self, detailed=True): - """Lists all volumes. - - :param detailed: Whether to return detailed volume info. - :returns: Return volumes list. - """ - return self._impl.list_volumes(detailed=detailed) - - @service.should_be_overridden - def get_volume(self, volume_id): - """Get a volume. - - :param volume_id: The ID of the volume to get. - - :returns: Return the volume. - """ - return self._impl.get_volume(volume_id) - - @service.should_be_overridden - def update_volume(self, volume_id, - name=None, description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param name: The volume name. - :param description: The volume description. - - :returns: The updated volume. - """ - return self._impl.update_volume( - volume_id, name=name, description=description) - - @service.should_be_overridden - def delete_volume(self, volume): - """Delete a volume.""" - self._impl.delete_volume(volume) - - @service.should_be_overridden - def extend_volume(self, volume, new_size): - """Extend the size of the specified volume.""" - return self._impl.extend_volume(volume, new_size=new_size) - - @service.should_be_overridden - def list_snapshots(self, detailed=True): - """Get a list of all snapshots.""" - return self._impl.list_snapshots(detailed=detailed) - - @service.should_be_overridden - def list_types(self, search_opts=None, is_public=None): - """Lists all volume types.""" - return self._impl.list_types(search_opts=search_opts, - is_public=is_public) - - @service.should_be_overridden - def set_metadata(self, volume, sets=10, set_size=3): - """Update/Set a volume metadata. - - :param volume: The updated/setted volume. - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :returns: A list of keys that were set - """ - return self._impl.set_metadata(volume, sets=sets, set_size=set_size) - - @service.should_be_overridden - def delete_metadata(self, volume, keys, deletes=10, delete_size=3): - """Delete volume metadata keys. - - Note that ``len(keys)`` must be greater than or equal to - ``deletes * delete_size``. - - :param volume: The volume to delete metadata from - :param deletes: how many operations to perform - :param delete_size: number of metadata keys to delete in each operation - :param keys: a list of keys to choose deletion candidates from - """ - self._impl.delete_metadata(volume, keys, deletes=deletes, - delete_size=delete_size) - - @service.should_be_overridden - def update_readonly_flag(self, volume, read_only): - """Update the read-only access mode flag of the specified volume. - - :param volume: The UUID of the volume to update. - :param read_only: The value to indicate whether to update volume to - read-only access mode. - :returns: A tuple of http Response and body - """ - return self._impl.update_readonly_flag(volume, read_only=read_only) - - @service.should_be_overridden - def upload_volume_to_image(self, volume, force=False, - container_format="bare", disk_format="raw"): - """Upload the given volume to image. - - Returns created image. - - :param volume: volume object - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso - :returns: Returns created image object - """ - return self._impl.upload_volume_to_image( - volume, force=force, container_format=container_format, - disk_format=disk_format) - - @service.should_be_overridden - def create_qos(self, specs): - """Create a qos specs. - - :param specs: A dict of key/value pairs to be set - :rtype: :class:'QoSSpecs' - """ - return self._impl.create_qos(specs) - - @service.should_be_overridden - def list_qos(self, search_opts=None): - """Get a list of all qos specs. - - :param search_opts: search options - :rtype: list of :class: 'QoSpecs' - """ - return self._impl.list_qos(search_opts) - - @service.should_be_overridden - def get_qos(self, qos_id): - """Get a specific qos specs. - - :param qos_id: The ID of the :class:`QoSSpecs` to get. - :rtype: :class:`QoSSpecs` - """ - return self._impl.get_qos(qos_id) - - @service.should_be_overridden - def set_qos(self, qos, set_specs_args): - """Add/Update keys in qos specs. - - :param qos: The instance of the :class:`QoSSpecs` to set - :param set_specs_args: A dict of key/value pairs to be set - :rtype: :class:`QoSSpecs` - """ - return self._impl.set_qos(qos=qos, - set_specs_args=set_specs_args) - - @service.should_be_overridden - def qos_associate_type(self, qos_specs, volume_type): - """Associate qos specs from volume type. - - :param qos_specs: The qos specs to be associated with - :param volume_type: The volume type id to be associated with - :rtype: :class:`QoSSpecs` - """ - return self._impl.qos_associate_type(qos_specs, volume_type) - - @service.should_be_overridden - def qos_disassociate_type(self, qos_specs, volume_type): - """Disassociate qos specs from volume type. - - :param qos_specs: The qos specs to be associated with - :param volume_type: The volume type id to be disassociated with - :rtype: :class:`QoSSpecs` - """ - return self._impl.qos_disassociate_type(qos_specs, volume_type) - - @service.should_be_overridden - def create_snapshot(self, volume_id, force=False, - name=None, description=None, metadata=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: If force is True, create a snapshot even if the volume is - attached to an instance. Default is False. - :param name: Name of the snapshot - :param description: Description of the snapshot - :param metadata: Metadata of the snapshot - :returns: Created snapshot object - """ - return self._impl.create_snapshot( - volume_id, force=force, name=name, - description=description, metadata=metadata) - - @service.should_be_overridden - def delete_snapshot(self, snapshot): - """Delete the given snapshot. - - Returns when the snapshot is actually deleted. - - :param snapshot: snapshot instance - """ - self._impl.delete_snapshot(snapshot) - - @service.should_be_overridden - def create_backup(self, volume_id, container=None, - name=None, description=None, - incremental=False, force=False, - snapshot_id=None): - """Creates a volume backup. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - :param incremental: Incremental backup. - :param force: If True, allows an in-use volume to be backed up. - :param snapshot_id: The ID of the snapshot to backup. - - :returns: The created backup object. - """ - return self._impl.create_backup(volume_id, container=container, - name=name, description=description, - incremental=incremental, force=force, - snapshot_id=snapshot_id) - - @service.should_be_overridden - def delete_backup(self, backup): - """Delete a volume backup.""" - self._impl.delete_backup(backup) - - @service.should_be_overridden - def restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - - :returns: Return the restored backup. - """ - return self._impl.restore_backup(backup_id, volume_id=volume_id) - - @service.should_be_overridden - def list_backups(self, detailed=True): - """Return user volume backups list.""" - return self._impl.list_backups(detailed=detailed) - - @service.should_be_overridden - def list_transfers(self, detailed=True, search_opts=None): - """Get a list of all volume transfers. - - :param detailed: If True, detailed information about transfer - should be listed - :param search_opts: Search options to filter out volume transfers - :returns: list of :class:`VolumeTransfer` - """ - return self._impl.list_transfers(detailed=detailed, - search_opts=search_opts) - - @service.should_be_overridden - def create_volume_type(self, name=None, description=None, is_public=True): - """Creates a volume type. - - :param name: Descriptive name of the volume type - :param description: Description of the volume type - :param is_public: Volume type visibility - :returns: Return the created volume type. - """ - return self._impl.create_volume_type(name=name, - description=description, - is_public=is_public) - - @service.should_be_overridden - def get_volume_type(self, volume_type): - """get details of volume_type. - - :param volume_type: The ID of the :class:`VolumeType` to get - :returns: :class:`VolumeType` - """ - return self._impl.get_volume_type(volume_type) - - @service.should_be_overridden - def delete_volume_type(self, volume_type): - """delete a volume type. - - :param volume_type: Name or Id of the volume type - :returns: base on client response return True if the request - has been accepted or not - """ - return self._impl.delete_volume_type(volume_type) - - @service.should_be_overridden - def set_volume_type_keys(self, volume_type, metadata): - """Set extra specs on a volume type. - - :param volume_type: The :class:`VolumeType` to set extra spec on - :param metadata: A dict of key/value pairs to be set - :returns: extra_specs if the request has been accepted - """ - return self._impl.set_volume_type_keys(volume_type, metadata) - - @service.should_be_overridden - def transfer_create(self, volume_id, name=None): - """Creates a volume transfer. - - :param name: The name of created transfer - :param volume_id: The ID of the volume to transfer. - :returns: Return the created transfer. - """ - return self._impl.transfer_create(volume_id, name=name) - - @service.should_be_overridden - def transfer_accept(self, transfer_id, auth_key): - """Accept a volume transfer. - - :param transfer_id: The ID of the transfer to accept. - :param auth_key: The auth_key of the transfer. - :returns: VolumeTransfer - """ - return self._impl.transfer_accept(transfer_id, auth_key=auth_key) - - @service.should_be_overridden - def create_encryption_type(self, volume_type, specs): - """Create encryption type for a volume type. Default: admin only. - - :param volume_type: the volume type on which to add an encryption type - :param specs: the encryption type specifications to add - :return: an instance of :class: VolumeEncryptionType - """ - return self._impl.create_encryption_type(volume_type, specs=specs) - - @service.should_be_overridden - def get_encryption_type(self, volume_type): - """Get the volume encryption type for the specified volume type. - - :param volume_type: the volume type to query - :return: an instance of :class: VolumeEncryptionType - """ - return self._impl.get_encryption_type(volume_type) - - @service.should_be_overridden - def list_encryption_type(self, search_opts=None): - """List all volume encryption types. - - :param search_opts: Options used when search for encryption types - :return: a list of :class: VolumeEncryptionType instances - """ - return self._impl.list_encryption_type(search_opts=search_opts) - - @service.should_be_overridden - def delete_encryption_type(self, volume_type): - """Delete the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be deleted - """ - self._impl.delete_encryption_type(volume_type) - - @service.should_be_overridden - def update_encryption_type(self, volume_type, specs): - """Update the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - will be updated - :param specs: the encryption type specifications to update - :return: an instance of :class: VolumeEncryptionType - """ - return self._impl.update_encryption_type(volume_type, specs=specs) diff --git a/rally/plugins/openstack/services/storage/cinder_common.py b/rally/plugins/openstack/services/storage/cinder_common.py deleted file mode 100644 index 05273fa1cc..0000000000 --- a/rally/plugins/openstack/services/storage/cinder_common.py +++ /dev/null @@ -1,728 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally import exceptions -from rally.plugins.openstack.services.image import image -from rally.plugins.openstack.services.storage import block -from rally.task import atomic -from rally.task import utils as bench_utils - -CONF = block.CONF - - -class CinderMixin(object): - - def _get_client(self): - return self._clients.cinder(self.version) - - def _update_resource(self, resource): - try: - manager = getattr(resource, "manager", None) - if manager: - res = manager.get(resource.id) - else: - if isinstance(resource, block.Volume): - attr = "volumes" - elif isinstance(resource, block.VolumeSnapshot): - attr = "volume_snapshots" - elif isinstance(resource, block.VolumeBackup): - attr = "backups" - res = getattr(self._get_client(), attr).get(resource.id) - except Exception as e: - if getattr(e, "code", getattr(e, "http_status", 400)) == 404: - raise exceptions.GetResourceNotFound(resource=resource) - raise exceptions.GetResourceFailure(resource=resource, err=e) - return res - - def _wait_available_volume(self, volume): - return bench_utils.wait_for_status( - volume, - ready_statuses=["available"], - update_resource=self._update_resource, - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - - def list_volumes(self, detailed=True): - """List all volumes.""" - aname = "cinder_v%s.list_volumes" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volumes.list(detailed) - - def get_volume(self, volume_id): - """Get target volume information.""" - aname = "cinder_v%s.get_volume" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volumes.get(volume_id) - - def delete_volume(self, volume): - """Delete target volume.""" - aname = "cinder_v%s.delete_volume" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().volumes.delete(volume) - bench_utils.wait_for_status( - volume, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self._update_resource, - timeout=CONF.openstack.cinder_volume_delete_timeout, - check_interval=(CONF.openstack - .cinder_volume_delete_poll_interval) - ) - - def extend_volume(self, volume, new_size): - """Extend the size of the specified volume.""" - if isinstance(new_size, dict): - new_size = random.randint(new_size["min"], new_size["max"]) - - aname = "cinder_v%s.extend_volume" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().volumes.extend(volume, new_size) - return self._wait_available_volume(volume) - - def list_snapshots(self, detailed=True): - """Get a list of all snapshots.""" - aname = "cinder_v%s.list_snapshots" % self.version - with atomic.ActionTimer(self, aname): - return (self._get_client() - .volume_snapshots.list(detailed)) - - def set_metadata(self, volume, sets=10, set_size=3): - """Set volume metadata. - - :param volume: The volume to set metadata on - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :returns: A list of keys that were set - """ - key = "cinder_v%s.set_%s_metadatas_%s_times" % (self.version, - set_size, - sets) - with atomic.ActionTimer(self, key): - keys = [] - for i in range(sets): - metadata = {} - for j in range(set_size): - key = self.generate_random_name() - keys.append(key) - metadata[key] = self.generate_random_name() - - self._get_client().volumes.set_metadata(volume, metadata) - return keys - - def delete_metadata(self, volume, keys, deletes=10, delete_size=3): - """Delete volume metadata keys. - - Note that ``len(keys)`` must be greater than or equal to - ``deletes * delete_size``. - - :param volume: The volume to delete metadata from - :param deletes: how many operations to perform - :param delete_size: number of metadata keys to delete in each operation - :param keys: a list of keys to choose deletion candidates from - """ - if len(keys) < deletes * delete_size: - raise exceptions.InvalidArgumentsException( - "Not enough metadata keys to delete: " - "%(num_keys)s keys, but asked to delete %(num_deletes)s" % - {"num_keys": len(keys), - "num_deletes": deletes * delete_size}) - # make a shallow copy of the list of keys so that, when we pop - # from it later, we don't modify the original list. - keys = list(keys) - random.shuffle(keys) - action_name = ("cinder_v%s.delete_%s_metadatas_%s_times" - % (self.version, delete_size, deletes)) - with atomic.ActionTimer(self, action_name): - for i in range(deletes): - to_del = keys[i * delete_size:(i + 1) * delete_size] - self._get_client().volumes.delete_metadata(volume, to_del) - - def update_readonly_flag(self, volume, read_only): - """Update the read-only access mode flag of the specified volume. - - :param volume: The UUID of the volume to update. - :param read_only: The value to indicate whether to update volume to - read-only access mode. - :returns: A tuple of http Response and body - """ - aname = "cinder_v%s.update_readonly_flag" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volumes.update_readonly_flag( - volume, read_only) - - def upload_volume_to_image(self, volume, force=False, - container_format="bare", disk_format="raw"): - """Upload the given volume to image. - - Returns created image. - - :param volume: volume object - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso - :returns: Returns created image object - """ - aname = "cinder_v%s.upload_volume_to_image" % self.version - with atomic.ActionTimer(self, aname): - resp, img = self._get_client().volumes.upload_to_image( - volume, force, self.generate_random_name(), container_format, - disk_format) - # NOTE (e0ne): upload_to_image changes volume status to uploading - # so we need to wait until it will be available. - volume = self._wait_available_volume(volume) - - image_id = img["os-volume_upload_image"]["image_id"] - glance = image.Image(self._clients) - - image_inst = glance.get_image(image_id) - image_inst = bench_utils.wait_for_status( - image_inst, - ready_statuses=["active"], - update_resource=glance.get_image, - timeout=CONF.openstack.glance_image_create_timeout, - check_interval=(CONF.openstack - .glance_image_create_poll_interval) - ) - - return image_inst - - def create_qos(self, specs): - """Create a qos specs. - - :param specs: A dict of key/value pairs to be set - :rtype: :class:'QoSSpecs' - """ - aname = "cinder_v%s.create_qos" % self.version - name = self.generate_random_name() - - with atomic.ActionTimer(self, aname): - return self._get_client().qos_specs.create(name, specs) - - def list_qos(self, search_opts=None): - """Get a list of all qos specs. - - :param search_opts: search options - :rtype: list of :class: 'QoSpecs' - """ - aname = "cinder_v%s.list_qos" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().qos_specs.list(search_opts) - - def get_qos(self, qos_id): - """Get a specific qos specs. - - :param qos_id: The ID of the :class: 'QoSSpecs' to get - :rtype: :class: 'QoSSpecs' - """ - aname = "cinder_v%s.get_qos" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().qos_specs.get(qos_id) - - def set_qos(self, qos_id, set_specs_args): - """Add/Update keys in qos specs. - - :param qos_id: The ID of the :class:`QoSSpecs` to get - :param set_specs_args: A dict of key/value pairs to be set - :rtype: class 'cinderclient.apiclient.base.DictWithMeta' - {"qos_specs": set_specs_args} - """ - aname = "cinder_v%s.set_qos" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().qos_specs.set_keys(qos_id, - set_specs_args) - - def qos_associate_type(self, qos_specs, vol_type_id): - """Associate qos specs from volume type. - - :param qos_specs: The qos specs to be associated with - :param vol_type_id: The volume type id to be associated with - :returns: base on client response return True if the request - has been accepted or not - """ - aname = "cinder_v%s.qos_associate_type" % self.version - with atomic.ActionTimer(self, aname): - tuple_res = self._get_client().qos_specs.associate(qos_specs, - vol_type_id) - return (tuple_res[0].status_code == 202) - - def qos_disassociate_type(self, qos_specs, vol_type_id): - """Disassociate qos specs from volume type. - - :param qos_specs: The qos specs to be disassociated with - :param vol_type_id: The volume type id to be disassociated with - :returns: base on client response return True if the request - has been accepted or not - """ - aname = "cinder_v%s.qos_disassociate_type" % self.version - with atomic.ActionTimer(self, aname): - tuple_res = self._get_client().qos_specs.disassociate(qos_specs, - vol_type_id) - return (tuple_res[0].status_code == 202) - - def delete_snapshot(self, snapshot): - """Delete the given snapshot. - - Returns when the snapshot is actually deleted. - - :param snapshot: snapshot object - """ - aname = "cinder_v%s.delete_snapshot" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().volume_snapshots.delete(snapshot) - bench_utils.wait_for_status( - snapshot, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self._update_resource, - timeout=CONF.openstack.cinder_volume_delete_timeout, - check_interval=(CONF.openstack - .cinder_volume_delete_poll_interval) - ) - - def delete_backup(self, backup): - """Delete the given backup. - - Returns when the backup is actually deleted. - - :param backup: backup instance - """ - aname = "cinder_v%s.delete_backup" % self.version - with atomic.ActionTimer(self, aname): - self._get_client().backups.delete(backup) - bench_utils.wait_for_status( - backup, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self._update_resource, - timeout=CONF.openstack.cinder_volume_delete_timeout, - check_interval=(CONF.openstack - .cinder_volume_delete_poll_interval) - ) - - def restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - """ - aname = "cinder_v%s.restore_backup" % self.version - with atomic.ActionTimer(self, aname): - restore = self._get_client().restores.restore(backup_id, volume_id) - restored_volume = self._get_client().volumes.get(restore.volume_id) - return self._wait_available_volume(restored_volume) - - def list_backups(self, detailed=True): - """Return user volume backups list. - - :param detailed: True if detailed information about backup - should be listed - """ - aname = "cinder_v%s.list_backups" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().backups.list(detailed) - - def list_transfers(self, detailed=True, search_opts=None): - """Get a list of all volume transfers. - - :param detailed: If True, detailed information about transfer - should be listed - :param search_opts: Search options to filter out volume transfers - :returns: list of :class:`VolumeTransfer` - """ - aname = "cinder_v%s.list_transfers" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().transfers.list(detailed, search_opts) - - def get_volume_type(self, volume_type): - """get details of volume_type. - - :param volume_type: The ID of the :class:`VolumeType` to get - :returns: :class:`VolumeType` - """ - aname = "cinder_v%s.get_volume_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_types.get(volume_type) - - def delete_volume_type(self, volume_type): - """delete a volume type. - - :param volume_type: Name or Id of the volume type - :returns: base on client response return True if the request - has been accepted or not - """ - aname = "cinder_v%s.delete_volume_type" % self.version - with atomic.ActionTimer(self, aname): - tuple_res = self._get_client().volume_types.delete( - volume_type) - return (tuple_res[0].status_code == 202) - - def set_volume_type_keys(self, volume_type, metadata): - """Set extra specs on a volume type. - - :param volume_type: The :class:`VolumeType` to set extra spec on - :param metadata: A dict of key/value pairs to be set - :returns: extra_specs if the request has been accepted - """ - aname = "cinder_v%s.set_volume_type_keys" % self.version - with atomic.ActionTimer(self, aname): - return volume_type.set_keys(metadata) - - def transfer_create(self, volume_id, name=None): - """Create a volume transfer. - - :param name: The name of created transfer - :param volume_id: The ID of the volume to transfer - :rtype: VolumeTransfer - """ - name = name or self.generate_random_name() - aname = "cinder_v%s.transfer_create" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().transfers.create(volume_id, name=name) - - def transfer_accept(self, transfer_id, auth_key): - """Accept a volume transfer. - - :param transfer_id: The ID of the transfer to accept. - :param auth_key: The auth_key of the transfer. - :rtype: VolumeTransfer - """ - aname = "cinder_v%s.transfer_accept" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().transfers.accept(transfer_id, auth_key) - - def create_encryption_type(self, volume_type, specs): - """Create encryption type for a volume type. Default: admin only. - - :param volume_type: the volume type on which to add an encryption type - :param specs: the encryption type specifications to add - :return: an instance of :class: VolumeEncryptionType - """ - aname = "cinder_v%s.create_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_encryption_types.create( - volume_type, specs) - - def get_encryption_type(self, volume_type): - """Get the volume encryption type for the specified volume type. - - :param volume_type: the volume type to query - :return: an instance of :class: VolumeEncryptionType - """ - aname = "cinder_v%s.get_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_encryption_types.get( - volume_type) - - def list_encryption_type(self, search_opts=None): - """List all volume encryption types. - - :param search_opts: Options used when search for encryption types - :return: a list of :class: VolumeEncryptionType instances - """ - aname = "cinder_v%s.list_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_encryption_types.list( - search_opts) - - def delete_encryption_type(self, volume_type): - """Delete the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be deleted - """ - aname = "cinder_v%s.delete_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - resp = self._get_client().volume_encryption_types.delete( - volume_type) - if (resp[0].status_code != 202): - raise exceptions.RallyException( - "EncryptionType Deletion Failed") - - def update_encryption_type(self, volume_type, specs): - """Update the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be updated - :param specs: the encryption type specifications to update - :return: an instance of :class: VolumeEncryptionType - """ - aname = "cinder_v%s.update_encryption_type" % self.version - with atomic.ActionTimer(self, aname): - return self._get_client().volume_encryption_types.update( - volume_type, specs) - - -class UnifiedCinderMixin(object): - - @staticmethod - def _unify_backup(backup): - return block.VolumeBackup(id=backup.id, name=backup.name, - volume_id=backup.volume_id, - status=backup.status) - - @staticmethod - def _unify_transfer(transfer): - auth_key = transfer.auth_key if hasattr(transfer, "auth_key") else None - return block.VolumeTransfer(id=transfer.id, name=transfer.name, - volume_id=transfer.volume_id, - auth_key=auth_key) - - @staticmethod - def _unify_qos(qos): - return block.QoSSpecs(id=qos.id, name=qos.name, specs=qos.specs) - - @staticmethod - def _unify_encryption_type(encryption_type): - return block.VolumeEncryptionType( - id=encryption_type.encryption_id, - volume_type_id=encryption_type.volume_type_id) - - def delete_volume(self, volume): - """Delete a volume.""" - self._impl.delete_volume(volume) - - def set_metadata(self, volume, sets=10, set_size=3): - """Update/Set a volume metadata. - - :param volume: The updated/setted volume. - :param sets: how many operations to perform - :param set_size: number of metadata keys to set in each operation - :returns: A list of keys that were set - """ - return self._impl.set_metadata(volume, sets=sets, set_size=set_size) - - def delete_metadata(self, volume, keys, deletes=10, delete_size=3): - """Delete volume metadata keys. - - Note that ``len(keys)`` must be greater than or equal to - ``deletes * delete_size``. - - :param volume: The volume to delete metadata from - :param deletes: how many operations to perform - :param delete_size: number of metadata keys to delete in each operation - :param keys: a list of keys to choose deletion candidates from - """ - self._impl.delete_metadata(volume, keys=keys, deletes=10, - delete_size=3) - - def update_readonly_flag(self, volume, read_only): - """Update the read-only access mode flag of the specified volume. - - :param volume: The UUID of the volume to update. - :param read_only: The value to indicate whether to update volume to - read-only access mode. - :returns: A tuple of http Response and body - """ - return self._impl.update_readonly_flag(volume, read_only=read_only) - - def upload_volume_to_image(self, volume, force=False, - container_format="bare", disk_format="raw"): - """Upload the given volume to image. - - Returns created image. - - :param volume: volume object - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param container_format: container format of image. Acceptable - formats: ami, ari, aki, bare, and ovf - :param disk_format: disk format of image. Acceptable formats: - ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso - :returns: Returns created image object - """ - return self._impl.upload_volume_to_image( - volume, force=force, container_format=container_format, - disk_format=disk_format) - - def create_qos(self, specs): - """Create a qos specs. - - :param specs: A dict of key/value pairs to be set - :rtype: :class:'QoSSpecs' - """ - return self._unify_qos(self._impl.create_qos(specs)) - - def list_qos(self, search_opts=None): - """Get a list of all qos specs. - - :param search_opts: search options - :rtype: list of :class: 'QoSpecs' - """ - return [self._unify_qos(qos) - for qos in self._impl.list_qos(search_opts)] - - def get_qos(self, qos_id): - """Get a specific qos specs. - - :param qos_id: The ID of the :class: 'QoSSpecs' to get - :rtype: :class: 'QoSSpecs' - """ - return self._unify_qos(self._impl.get_qos(qos_id)) - - def set_qos(self, qos, set_specs_args): - """Add/Update keys in qos specs. - - :param qos: The instance of the :class:`QoSSpecs` to set - :param set_specs_args: A dict of key/value pairs to be set - :rtype: :class: 'QoSSpecs' - """ - self._impl.set_qos(qos.id, set_specs_args) - return self._unify_qos(qos) - - def qos_associate_type(self, qos_specs, vol_type_id): - """Associate qos specs from volume type. - - :param qos_specs: The qos specs to be associated with - :param vol_type_id: The volume type id to be associated with - """ - self._impl.qos_associate_type(qos_specs, vol_type_id) - return self._unify_qos(qos_specs) - - def qos_disassociate_type(self, qos_specs, vol_type_id): - """Disassociate qos specs from volume type. - - :param qos_specs: The qos specs to be disassociated with - :param vol_type_id: The volume type id to be disassociated with - """ - self._impl.qos_disassociate_type(qos_specs, vol_type_id) - return self._unify_qos(qos_specs) - - def delete_snapshot(self, snapshot): - """Delete the given backup. - - Returns when the backup is actually deleted. - - :param backup: backup instance - """ - self._impl.delete_snapshot(snapshot) - - def delete_backup(self, backup): - """Delete a volume backup.""" - self._impl.delete_backup(backup) - - def list_backups(self, detailed=True): - """Return user volume backups list.""" - return [self._unify_backup(backup) - for backup in self._impl.list_backups(detailed=detailed)] - - def list_transfers(self, detailed=True, search_opts=None): - """Get a list of all volume transfers. - - :param detailed: If True, detailed information about transfer - should be listed - :param search_opts: Search options to filter out volume transfers - :returns: list of :class:`VolumeTransfer` - """ - return [self._unify_transfer(transfer) - for transfer in self._impl.list_transfers( - detailed=detailed, search_opts=search_opts)] - - def get_volume_type(self, volume_type): - """get details of volume_type. - - :param volume_type: The ID of the :class:`VolumeType` to get - :returns: :class:`VolumeType` - """ - return self._impl.get_volume_type(volume_type) - - def delete_volume_type(self, volume_type): - """delete a volume type. - - :param volume_type: Name or Id of the volume type - :returns: base on client response return True if the request - has been accepted or not - """ - return self._impl.delete_volume_type(volume_type) - - def set_volume_type_keys(self, volume_type, metadata): - """Set extra specs on a volume type. - - :param volume_type: The :class:`VolumeType` to set extra spec on - :param metadata: A dict of key/value pairs to be set - :returns: extra_specs if the request has been accepted - """ - return self._impl.set_volume_type_keys(volume_type, metadata) - - def transfer_create(self, volume_id, name=None): - """Creates a volume transfer. - - :param name: The name of created transfer - :param volume_id: The ID of the volume to transfer. - :returns: Return the created transfer. - """ - return self._unify_transfer( - self._impl.transfer_create(volume_id, name=name)) - - def transfer_accept(self, transfer_id, auth_key): - """Accept a volume transfer. - - :param transfer_id: The ID of the transfer to accept. - :param auth_key: The auth_key of the transfer. - :returns: VolumeTransfer - """ - return self._unify_transfer( - self._impl.transfer_accept(transfer_id, auth_key=auth_key)) - - def create_encryption_type(self, volume_type, specs): - """Create encryption type for a volume type. Default: admin only. - - :param volume_type: the volume type on which to add an encryption type - :param specs: the encryption type specifications to add - :return: an instance of :class: VolumeEncryptionType - """ - return self._unify_encryption_type( - self._impl.create_encryption_type(volume_type, specs=specs)) - - def get_encryption_type(self, volume_type): - """Get the volume encryption type for the specified volume type. - - :param volume_type: the volume type to query - :return: an instance of :class: VolumeEncryptionType - """ - return self._unify_encryption_type( - self._impl.get_encryption_type(volume_type)) - - def list_encryption_type(self, search_opts=None): - """List all volume encryption types. - - :param search_opts: Options used when search for encryption types - :return: a list of :class: VolumeEncryptionType instances - """ - return [self._unify_encryption_type(encryption_type) - for encryption_type in self._impl.list_encryption_type( - search_opts=search_opts)] - - def delete_encryption_type(self, volume_type): - """Delete the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be deleted - """ - return self._impl.delete_encryption_type(volume_type) - - def update_encryption_type(self, volume_type, specs): - """Update the encryption type information for the specified volume type. - - :param volume_type: the volume type whose encryption type information - must be updated - :param specs: the encryption type specifications to update - :return: an instance of :class: VolumeEncryptionType - """ - return self._impl.update_encryption_type(volume_type, specs=specs) diff --git a/rally/plugins/openstack/services/storage/cinder_v1.py b/rally/plugins/openstack/services/storage/cinder_v1.py deleted file mode 100644 index d97fa6b14a..0000000000 --- a/rally/plugins/openstack/services/storage/cinder_v1.py +++ /dev/null @@ -1,314 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import utils as rutils -from rally.plugins.openstack import service -from rally.plugins.openstack.services.storage import block -from rally.plugins.openstack.services.storage import cinder_common -from rally.task import atomic - -CONF = block.CONF - - -@service.service("cinder", service_type="block-storage", version="1") -class CinderV1Service(service.Service, cinder_common.CinderMixin): - - @atomic.action_timer("cinder_v1.create_volume") - def create_volume(self, size, snapshot_id=None, source_volid=None, - display_name=None, display_description=None, - volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None): - """Creates a volume. - - :param size: Size of volume in GB - :param snapshot_id: ID of the snapshot - :param display_name: Name of the volume - :param display_description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - - :returns: Return a new volume. - """ - if isinstance(size, dict): - size = random.randint(size["min"], size["max"]) - - volume = self._get_client().volumes.create( - size, - display_name=(display_name or self.generate_random_name()), - display_description=display_description, - snapshot_id=snapshot_id, - source_volid=source_volid, - volume_type=volume_type, - user_id=user_id, - project_id=project_id, - availability_zone=availability_zone, - metadata=metadata, - imageRef=imageRef - ) - - # NOTE(msdubov): It is reasonable to wait 5 secs before starting to - # check whether the volume is ready => less API calls. - rutils.interruptable_sleep( - CONF.openstack.cinder_volume_create_prepoll_delay) - - return self._wait_available_volume(volume) - - @atomic.action_timer("cinder_v1.update_volume") - def update_volume(self, volume_id, display_name=None, - display_description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param display_name: The volume name. - :param display_description: The volume description. - - :returns: The updated volume. - """ - kwargs = {} - if display_name is not None: - kwargs["display_name"] = display_name - if display_description is not None: - kwargs["display_description"] = display_description - updated_volume = self._get_client().volumes.update( - volume_id, **kwargs) - return updated_volume["volume"] - - @atomic.action_timer("cinder_v1.list_types") - def list_types(self, search_opts=None): - """Lists all volume types.""" - return (self._get_client() - .volume_types.list(search_opts)) - - @atomic.action_timer("cinder_v1.create_snapshot") - def create_snapshot(self, volume_id, force=False, - display_name=None, display_description=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param display_name: Name of the snapshot - :param display_description: Description of the snapshot - :returns: Created snapshot object - """ - kwargs = {"force": force, - "display_name": display_name or self.generate_random_name(), - "display_description": display_description} - - snapshot = self._get_client().volume_snapshots.create(volume_id, - **kwargs) - rutils.interruptable_sleep( - CONF.openstack.cinder_volume_create_prepoll_delay) - snapshot = self._wait_available_volume(snapshot) - return snapshot - - @atomic.action_timer("cinder_v1.create_backup") - def create_backup(self, volume_id, container=None, - name=None, description=None): - """Create a volume backup of the given volume. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - """ - kwargs = {"name": name or self.generate_random_name(), - "description": description, - "container": container} - backup = self._get_client().backups.create(volume_id, **kwargs) - return self._wait_available_volume(backup) - - @atomic.action_timer("cinder_v1.create_volume_type") - def create_volume_type(self, name=None): - """create volume type. - - :param kwargs: Optional additional arguments for volume type creation - :param name: Descriptive name of the volume type - """ - kwargs = {"name": name or self.generate_random_name()} - return self._get_client().volume_types.create(**kwargs) - - -@service.compat_layer(CinderV1Service) -class UnifiedCinderV1Service(cinder_common.UnifiedCinderMixin, - block.BlockStorage): - - @staticmethod - def _unify_volume(volume): - if isinstance(volume, dict): - return block.Volume(id=volume["id"], name=volume["display_name"], - size=volume["size"], status=volume["status"]) - else: - return block.Volume(id=volume.id, name=volume.display_name, - size=volume.size, status=volume.status) - - @staticmethod - def _unify_snapshot(snapshot): - return block.VolumeSnapshot(id=snapshot.id, name=snapshot.display_name, - volume_id=snapshot.volume_id, - status=snapshot.status) - - def create_volume(self, size, consistencygroup_id=None, - group_id=None, snapshot_id=None, source_volid=None, - name=None, description=None, - volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None, scheduler_hints=None, - source_replica=None, multiattach=False): - """Creates a volume. - - :param size: Size of volume in GB - :param consistencygroup_id: ID of the consistencygroup - :param group_id: ID of the group - :param snapshot_id: ID of the snapshot - :param name: Name of the volume - :param description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - :param source_volid: ID of source volume to clone from - :param source_replica: ID of source volume to clone replica - :param scheduler_hints: (optional extension) arbitrary key-value pairs - specified by the client to help boot an instance - :param multiattach: Allow the volume to be attached to more than - one instance - - :returns: Return a new volume. - """ - return self._unify_volume(self._impl.create_volume( - size, snapshot_id=snapshot_id, source_volid=source_volid, - display_name=name, - display_description=description, - volume_type=volume_type, user_id=user_id, - project_id=project_id, availability_zone=availability_zone, - metadata=metadata, imageRef=imageRef)) - - def list_volumes(self, detailed=True): - """Lists all volumes. - - :param detailed: Whether to return detailed volume info. - :returns: Return volumes list. - """ - return [self._unify_volume(volume) - for volume in self._impl.list_volumes(detailed=detailed)] - - def get_volume(self, volume_id): - """Get a volume. - - :param volume_id: The ID of the volume to get. - - :returns: Return the volume. - """ - return self._unify_volume(self._impl.get_volume(volume_id)) - - def extend_volume(self, volume, new_size): - """Extend the size of the specified volume.""" - return self._unify_volume( - self._impl.extend_volume(volume, new_size=new_size)) - - def update_volume(self, volume_id, - name=None, description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param name: The volume name. - :param description: The volume description. - - :returns: The updated volume. - """ - return self._unify_volume(self._impl.update_volume( - volume_id, display_name=name, - display_description=description)) - - def list_types(self, search_opts=None, is_public=None): - """Lists all volume types.""" - return self._impl.list_types(search_opts=search_opts) - - def create_snapshot(self, volume_id, force=False, - name=None, description=None, metadata=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: If force is True, create a snapshot even if the volume is - attached to an instance. Default is False. - :param name: Name of the snapshot - :param description: Description of the snapshot - :param metadata: Metadata of the snapshot - :returns: Created snapshot object - """ - return self._unify_snapshot(self._impl.create_snapshot( - volume_id, force=force, display_name=name, - display_description=description)) - - def list_snapshots(self, detailed=True): - """Get a list of all snapshots.""" - return [self._unify_snapshot(snapshot) - for snapshot in self._impl.list_snapshots(detailed=detailed)] - - def create_backup(self, volume_id, container=None, - name=None, description=None, - incremental=False, force=False, - snapshot_id=None): - """Creates a volume backup. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - :param incremental: Incremental backup. - :param force: If True, allows an in-use volume to be backed up. - :param snapshot_id: The ID of the snapshot to backup. - - :returns: The created backup object. - """ - return self._unify_backup(self._impl.create_backup( - volume_id, container=container, name=name, - description=description)) - - def create_volume_type(self, name=None, description=None, is_public=True): - """Creates a volume type. - - :param name: Descriptive name of the volume type - :param description: Description of the volume type - :param is_public: Volume type visibility - :returns: Return the created volume type. - """ - return self._impl.create_volume_type(name=name) - - def restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - - :returns: Return the restored backup. - """ - return self._unify_volume(self._impl.restore_backup( - backup_id, volume_id=volume_id)) diff --git a/rally/plugins/openstack/services/storage/cinder_v2.py b/rally/plugins/openstack/services/storage/cinder_v2.py deleted file mode 100644 index f5a55052e6..0000000000 --- a/rally/plugins/openstack/services/storage/cinder_v2.py +++ /dev/null @@ -1,382 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random - -from rally.common import utils as rutils -from rally.plugins.openstack import service -from rally.plugins.openstack.services.storage import block -from rally.plugins.openstack.services.storage import cinder_common -from rally.task import atomic - -CONF = block.CONF - - -@service.service("cinder", service_type="block-storage", version="2") -class CinderV2Service(service.Service, cinder_common.CinderMixin): - - @atomic.action_timer("cinder_v2.create_volume") - def create_volume(self, size, consistencygroup_id=None, - snapshot_id=None, source_volid=None, name=None, - description=None, volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None, scheduler_hints=None, - source_replica=None, multiattach=False): - """Creates a volume. - - :param size: Size of volume in GB - :param consistencygroup_id: ID of the consistencygroup - :param snapshot_id: ID of the snapshot - :param name: Name of the volume - :param description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - :param source_volid: ID of source volume to clone from - :param source_replica: ID of source volume to clone replica - :param scheduler_hints: (optional extension) arbitrary key-value pairs - specified by the client to help boot an instance - :param multiattach: Allow the volume to be attached to more than - one instance - - :returns: Return a new volume. - """ - kwargs = {"name": name or self.generate_random_name(), - "description": description, - "consistencygroup_id": consistencygroup_id, - "snapshot_id": snapshot_id, - "source_volid": source_volid, - "volume_type": volume_type, - "user_id": user_id, - "project_id": project_id, - "availability_zone": availability_zone, - "metadata": metadata, - "imageRef": imageRef, - "scheduler_hints": scheduler_hints, - "source_replica": source_replica, - "multiattach": multiattach} - if isinstance(size, dict): - size = random.randint(size["min"], size["max"]) - - volume = (self._get_client() - .volumes.create(size, **kwargs)) - - # NOTE(msdubov): It is reasonable to wait 5 secs before starting to - # check whether the volume is ready => less API calls. - rutils.interruptable_sleep( - CONF.openstack.cinder_volume_create_prepoll_delay) - - return self._wait_available_volume(volume) - - @atomic.action_timer("cinder_v2.update_volume") - def update_volume(self, volume_id, name=None, description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param name: The volume name. - :param description: The volume description. - - :returns: The updated volume. - """ - kwargs = {} - if name is not None: - kwargs["name"] = name - if description is not None: - kwargs["description"] = description - updated_volume = self._get_client().volumes.update( - volume_id, **kwargs) - return updated_volume["volume"] - - @atomic.action_timer("cinder_v2.list_types") - def list_types(self, search_opts=None, is_public=None): - """Lists all volume types.""" - return (self._get_client() - .volume_types.list(search_opts=search_opts, - is_public=is_public)) - - @atomic.action_timer("cinder_v2.create_snapshot") - def create_snapshot(self, volume_id, force=False, - name=None, description=None, metadata=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: flag to indicate whether to snapshot a volume even if - it's attached to an instance - :param name: Name of the snapshot - :param description: Description of the snapshot - :returns: Created snapshot object - """ - kwargs = {"force": force, - "name": name or self.generate_random_name(), - "description": description, - "metadata": metadata} - - snapshot = self._get_client().volume_snapshots.create(volume_id, - **kwargs) - rutils.interruptable_sleep( - CONF.openstack.cinder_volume_create_prepoll_delay) - snapshot = self._wait_available_volume(snapshot) - return snapshot - - @atomic.action_timer("cinder_v2.create_backup") - def create_backup(self, volume_id, container=None, - name=None, description=None, - incremental=False, force=False, - snapshot_id=None): - """Create a volume backup of the given volume. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - :param incremental: Incremental backup. - :param force: If True, allows an in-use volume to be backed up. - :param snapshot_id: The ID of the snapshot to backup. - """ - kwargs = {"force": force, - "name": name or self.generate_random_name(), - "description": description, - "container": container, - "incremental": incremental, - "force": force, - "snapshot_id": snapshot_id} - backup = self._get_client().backups.create(volume_id, **kwargs) - return self._wait_available_volume(backup) - - @atomic.action_timer("cinder_v2.create_volume_type") - def create_volume_type(self, name=None, description=None, is_public=True): - """create volume type. - - :param name: Descriptive name of the volume type - :param description: Description of the volume type - :param is_public: Volume type visibility - :returns: Return the created volume type. - :returns: VolumeType object - """ - kwargs = {"name": name or self.generate_random_name(), - "description": description, - "is_public": is_public} - return self._get_client().volume_types.create(**kwargs) - - @atomic.action_timer("cinder_v2.update_volume_type") - def update_volume_type(self, volume_type, name=None, - description=None, is_public=None): - """Update the name and/or description for a volume type. - - :param volume_type: The ID or an instance of the :class:`VolumeType` - to update. - :param name: if None, updates name by generating random name. - else updates name with provided name - :param description: Description of the volume type. - :rtype: :class:`VolumeType` - """ - name = name or self.generate_random_name() - - return self._get_client().volume_types.update(volume_type, name, - description, is_public) - - @atomic.action_timer("cinder_v2.add_type_access") - def add_type_access(self, volume_type, project): - """Add a project to the given volume type access list. - - :param volume_type: Volume type name or ID to add access for the given - project - :project: Project ID to add volume type access for - :return: An instance of cinderclient.apiclient.base.TupleWithMeta - """ - return self._get_client().volume_type_access.add_project_access( - volume_type, project) - - @atomic.action_timer("cinder_v2.list_type_access") - def list_type_access(self, volume_type): - """Print access information about the given volume type - - :param volume_type: Filter results by volume type name or ID - :return: VolumeTypeAcces of specific project - """ - return self._get_client().volume_type_access.list(volume_type) - - -@service.compat_layer(CinderV2Service) -class UnifiedCinderV2Service(cinder_common.UnifiedCinderMixin, - block.BlockStorage): - - @staticmethod - def _unify_volume(volume): - if isinstance(volume, dict): - return block.Volume(id=volume["id"], name=volume["name"], - size=volume["size"], status=volume["status"]) - else: - return block.Volume(id=volume.id, name=volume.name, - size=volume.size, status=volume.status) - - @staticmethod - def _unify_snapshot(snapshot): - return block.VolumeSnapshot(id=snapshot.id, name=snapshot.name, - volume_id=snapshot.volume_id, - status=snapshot.status) - - def create_volume(self, size, consistencygroup_id=None, - group_id=None, snapshot_id=None, source_volid=None, - name=None, description=None, - volume_type=None, user_id=None, - project_id=None, availability_zone=None, - metadata=None, imageRef=None, scheduler_hints=None, - source_replica=None, multiattach=False): - """Creates a volume. - - :param size: Size of volume in GB - :param consistencygroup_id: ID of the consistencygroup - :param group_id: ID of the group - :param snapshot_id: ID of the snapshot - :param name: Name of the volume - :param description: Description of the volume - :param volume_type: Type of volume - :param user_id: User id derived from context - :param project_id: Project id derived from context - :param availability_zone: Availability Zone to use - :param metadata: Optional metadata to set on volume creation - :param imageRef: reference to an image stored in glance - :param source_volid: ID of source volume to clone from - :param source_replica: ID of source volume to clone replica - :param scheduler_hints: (optional extension) arbitrary key-value pairs - specified by the client to help boot an instance - :param multiattach: Allow the volume to be attached to more than - one instance - - :returns: Return a new volume. - """ - return self._unify_volume(self._impl.create_volume( - size, consistencygroup_id=consistencygroup_id, - snapshot_id=snapshot_id, - source_volid=source_volid, name=name, - description=description, volume_type=volume_type, - user_id=user_id, project_id=project_id, - availability_zone=availability_zone, metadata=metadata, - imageRef=imageRef, scheduler_hints=scheduler_hints, - source_replica=source_replica, multiattach=multiattach)) - - def list_volumes(self, detailed=True): - """Lists all volumes. - - :param detailed: Whether to return detailed volume info. - :returns: Return volumes list. - """ - return [self._unify_volume(volume) - for volume in self._impl.list_volumes(detailed=detailed)] - - def get_volume(self, volume_id): - """Get a volume. - - :param volume_id: The ID of the volume to get. - - :returns: Return the volume. - """ - return self._unify_volume(self._impl.get_volume(volume_id)) - - def extend_volume(self, volume, new_size): - """Extend the size of the specified volume.""" - return self._unify_volume( - self._impl.extend_volume(volume, new_size=new_size)) - - def update_volume(self, volume_id, - name=None, description=None): - """Update the name or description for a volume. - - :param volume_id: The updated volume id. - :param name: The volume name. - :param description: The volume description. - - :returns: The updated volume. - """ - return self._unify_volume(self._impl.update_volume( - volume_id, name=name, description=description)) - - def list_types(self, search_opts=None, is_public=None): - """Lists all volume types.""" - return self._impl.list_types(search_opts=search_opts, - is_public=is_public) - - def create_snapshot(self, volume_id, force=False, - name=None, description=None, metadata=None): - """Create one snapshot. - - Returns when the snapshot is actually created and is in the "Available" - state. - - :param volume_id: volume uuid for creating snapshot - :param force: If force is True, create a snapshot even if the volume is - attached to an instance. Default is False. - :param name: Name of the snapshot - :param description: Description of the snapshot - :param metadata: Metadata of the snapshot - :returns: Created snapshot object - """ - return self._unify_snapshot(self._impl.create_snapshot( - volume_id, force=force, name=name, - description=description, metadata=metadata)) - - def list_snapshots(self, detailed=True): - """Get a list of all snapshots.""" - return [self._unify_snapshot(snapshot) - for snapshot in self._impl.list_snapshots(detailed=detailed)] - - def create_backup(self, volume_id, container=None, - name=None, description=None, - incremental=False, force=False, - snapshot_id=None): - """Creates a volume backup. - - :param volume_id: The ID of the volume to backup. - :param container: The name of the backup service container. - :param name: The name of the backup. - :param description: The description of the backup. - :param incremental: Incremental backup. - :param force: If True, allows an in-use volume to be backed up. - :param snapshot_id: The ID of the snapshot to backup. - - :returns: The created backup object. - """ - return self._unify_backup(self._impl.create_backup( - volume_id, container=container, name=name, description=description, - incremental=incremental, force=force, snapshot_id=snapshot_id)) - - def create_volume_type(self, name=None, description=None, is_public=True): - """Creates a volume type. - - :param name: Descriptive name of the volume type - :param description: Description of the volume type - :param is_public: Volume type visibility - :returns: Return the created volume type. - """ - return self._impl.create_volume_type(name=name, - description=description, - is_public=is_public) - - def restore_backup(self, backup_id, volume_id=None): - """Restore the given backup. - - :param backup_id: The ID of the backup to restore. - :param volume_id: The ID of the volume to restore the backup to. - - :returns: Return the restored backup. - """ - return self._unify_volume(self._impl.restore_backup( - backup_id, volume_id=volume_id)) diff --git a/rally/plugins/openstack/types.py b/rally/plugins/openstack/types.py deleted file mode 100644 index da72308d34..0000000000 --- a/rally/plugins/openstack/types.py +++ /dev/null @@ -1,196 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from rally.common import logging -from rally.common.plugin import plugin -from rally import exceptions -from rally.plugins.openstack import osclients -from rally.plugins.openstack.services.image import image -from rally.plugins.openstack.services.storage import block -from rally.task import types - - -LOG = logging.getLogger(__name__) - - -class OpenStackResourceType(types.ResourceType): - """A base class for OpenStack ResourceTypes plugins with help-methods""" - def __init__(self, context, cache=None): - super(OpenStackResourceType, self).__init__(context, cache) - self._clients = None - if self._context.get("admin"): - self._clients = osclients.Clients( - self._context["admin"]["credential"]) - elif self._context.get("users"): - self._clients = osclients.Clients( - self._context["users"][0]["credential"]) - - -@plugin.configure(name="nova_flavor") -class Flavor(OpenStackResourceType, types.DeprecatedBehaviourMixin): - """Find Nova's flavor ID by name or regexp.""" - - def pre_process(self, resource_spec, config): - resource_id = resource_spec.get("id") - if not resource_id: - novaclient = self._clients.nova() - resource_id = types._id_from_name( - resource_config=resource_spec, - resources=novaclient.flavors.list(), - typename="flavor") - return resource_id - - -@plugin.configure(name="ec2_flavor") -class EC2Flavor(OpenStackResourceType, types.DeprecatedBehaviourMixin): - """Find Nova's flavor Name by it's ID or regexp.""" - - def pre_process(self, resource_spec, config): - resource_name = resource_spec.get("name") - if not resource_name: - # NOTE(wtakase): gets resource name from OpenStack id - novaclient = self._clients.nova() - resource_name = types._name_from_id( - resource_config=resource_spec, - resources=novaclient.flavors.list(), - typename="flavor") - return resource_name - - -@plugin.configure(name="glance_image") -class GlanceImage(OpenStackResourceType, types.DeprecatedBehaviourMixin): - """Find Glance's image ID by name or regexp.""" - - def pre_process(self, resource_spec, config): - resource_id = resource_spec.get("id") - list_kwargs = resource_spec.get("list_kwargs", {}) - - if not resource_id: - cache_id = hash(frozenset(list_kwargs.items())) - if cache_id not in self._cache: - glance = image.Image(self._clients) - self._cache[cache_id] = glance.list_images(**list_kwargs) - images = self._cache[cache_id] - resource_id = types._id_from_name( - resource_config=resource_spec, - resources=images, - typename="image") - return resource_id - - -@plugin.configure(name="glance_image_args") -class GlanceImageArguments(OpenStackResourceType, - types.DeprecatedBehaviourMixin): - """Process Glance image create options to look similar in case of V1/V2.""" - def pre_process(self, resource_spec, config): - resource_spec = copy.deepcopy(resource_spec) - if "is_public" in resource_spec: - if "visibility" in resource_spec: - resource_spec.pop("is_public") - else: - visibility = ("public" if resource_spec.pop("is_public") - else "private") - resource_spec["visibility"] = visibility - return resource_spec - - -@plugin.configure(name="ec2_image") -class EC2Image(OpenStackResourceType, types.DeprecatedBehaviourMixin): - """Find EC2 image ID.""" - - def pre_process(self, resource_spec, config): - if "name" not in resource_spec and "regex" not in resource_spec: - # NOTE(wtakase): gets resource name from OpenStack id - glanceclient = self._clients.glance() - resource_name = types._name_from_id( - resource_config=resource_spec, - resources=list(glanceclient.images.list()), - typename="image") - resource_spec["name"] = resource_name - - # NOTE(wtakase): gets EC2 resource id from name or regex - ec2client = self._clients.ec2() - resource_ec2_id = types._id_from_name( - resource_config=resource_spec, - resources=list(ec2client.get_all_images()), - typename="ec2_image") - return resource_ec2_id - - -@plugin.configure(name="cinder_volume_type") -class VolumeType(OpenStackResourceType, types.DeprecatedBehaviourMixin): - """Find Cinder volume type ID by name or regexp.""" - - def pre_process(self, resource_spec, config): - resource_id = resource_spec.get("id") - if not resource_id: - cinder = block.BlockStorage(self._clients) - resource_id = types._id_from_name( - resource_config=resource_spec, - resources=cinder.list_types(), - typename="volume_type") - return resource_id - - -@plugin.configure(name="neutron_network") -class NeutronNetwork(OpenStackResourceType, types.DeprecatedBehaviourMixin): - """Find Neutron network ID by it's name.""" - def pre_process(self, resource_spec, config): - resource_id = resource_spec.get("id") - if resource_id: - return resource_id - else: - neutronclient = self._clients.neutron() - for net in neutronclient.list_networks()["networks"]: - if net["name"] == resource_spec.get("name"): - return net["id"] - - raise exceptions.InvalidScenarioArgument( - "Neutron network with name '{name}' not found".format( - name=resource_spec.get("name"))) - - -@plugin.configure(name="watcher_strategy") -class WatcherStrategy(OpenStackResourceType, types.DeprecatedBehaviourMixin): - """Find Watcher strategy ID by it's name.""" - - def pre_process(self, resource_spec, config): - resource_id = resource_spec.get("id") - if not resource_id: - watcherclient = self._clients.watcher() - resource_id = types._id_from_name( - resource_config=resource_spec, - resources=[watcherclient.strategy.get( - resource_spec.get("name"))], - typename="strategy", - id_attr="uuid") - return resource_id - - -@plugin.configure(name="watcher_goal") -class WatcherGoal(OpenStackResourceType, types.DeprecatedBehaviourMixin): - """Find Watcher goal ID by it's name.""" - - def pre_process(self, resource_spec, config): - resource_id = resource_spec.get("id") - if not resource_id: - watcherclient = self._clients.watcher() - resource_id = types._id_from_name( - resource_config=resource_spec, - resources=[watcherclient.goal.get(resource_spec.get("name"))], - typename="goal", - id_attr="uuid") - return resource_id diff --git a/rally/plugins/openstack/validators.py b/rally/plugins/openstack/validators.py deleted file mode 100644 index 8a2f55cc16..0000000000 --- a/rally/plugins/openstack/validators.py +++ /dev/null @@ -1,621 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import os -import re -import six - -from glanceclient import exc as glance_exc -from novaclient import exceptions as nova_exc -from rally.task import types - -from rally.common import logging -from rally.common import validation -from rally.common import yamlutils as yaml -from rally import consts -from rally import exceptions -from rally.plugins.common import validators -from rally.plugins.openstack.context.keystone import roles -from rally.plugins.openstack.context.nova import flavors as flavors_ctx -from rally.plugins.openstack import types as openstack_types - -LOG = logging.getLogger(__name__) - - -def with_roles_ctx(): - """Add roles to users for validate - - """ - def decorator(func): - def wrapper(*args, **kw): - func_type = inspect.getcallargs(func, *args, **kw) - config = func_type.get("config", {}) - context = func_type.get("context", {}) - if config.get("contexts", {}).get("roles") \ - and context.get("admin", {}): - context["config"] = config["contexts"] - rolegenerator = roles.RoleGenerator(context) - with rolegenerator: - rolegenerator.setup() - func(*args, **kw) - else: - func(*args, **kw) - return wrapper - return decorator - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="image_exists", platform="openstack") -class ImageExistsValidator(validation.Validator): - - def __init__(self, param_name, nullable): - """Validator checks existed image or not - - :param param_name: defines which variable should be used - to get image id value. - :param nullable: defines image id param is required - """ - super(ImageExistsValidator, self).__init__() - self.param_name = param_name - self.nullable = nullable - - @with_roles_ctx() - def validate(self, context, config, plugin_cls, plugin_cfg): - - image_args = config.get("args", {}).get(self.param_name) - - if not image_args and self.nullable: - return - - image_context = config.get("contexts", {}).get("images", {}) - image_ctx_name = image_context.get("image_name") - - if not image_args: - self.fail("Parameter %s is not specified." % self.param_name) - - if "image_name" in image_context: - # NOTE(rvasilets) check string is "exactly equal to" a regex - # or image name from context equal to image name from args - if "regex" in image_args: - match = re.match(image_args.get("regex"), image_ctx_name) - if image_ctx_name == image_args.get("name") or ( - "regex" in image_args and match): - return - try: - for user in context["users"]: - image_processor = openstack_types.GlanceImage( - context={"admin": {"credential": user["credential"]}}) - image_id = image_processor.pre_process(image_args, config={}) - user["credential"].clients().glance().images.get(image_id) - except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument): - self.fail("Image '%s' not found" % image_args) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="external_network_exists", platform="openstack") -class ExternalNetworkExistsValidator(validation.Validator): - - def __init__(self, param_name): - """Validator checks that external network with given name exists. - - :param param_name: name of validated network - """ - super(ExternalNetworkExistsValidator, self).__init__() - self.param_name = param_name - - @with_roles_ctx() - def validate(self, context, config, plugin_cls, plugin_cfg): - - ext_network = config.get("args", {}).get(self.param_name) - if not ext_network: - return - - result = [] - for user in context["users"]: - creds = user["credential"] - - networks = creds.clients().neutron().list_networks()["networks"] - external_networks = [net["name"] for net in networks if - net.get("router:external", False)] - if ext_network not in external_networks: - message = ("External (floating) network with name {1} " - "not found by user {0}. " - "Available networks: {2}").format(creds.username, - ext_network, - networks) - result.append(message) - if result: - self.fail("\n".join(result)) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="required_neutron_extensions", platform="openstack") -class RequiredNeutronExtensionsValidator(validation.Validator): - - def __init__(self, extensions, *args): - """Validator checks if the specified Neutron extension is available - - :param extensions: list of Neutron extensions - """ - super(RequiredNeutronExtensionsValidator, self).__init__() - if isinstance(extensions, (list, tuple)): - # services argument is a list, so it is a new way of validators - # usage, args in this case should not be provided - self.req_ext = extensions - if args: - LOG.warning("Positional argument is not what " - "'required_neutron_extensions' decorator expects. " - "Use `extensions` argument instead") - else: - # it is old way validator - self.req_ext = [extensions] - self.req_ext.extend(args) - - @with_roles_ctx() - def validate(self, context, config, plugin_cls, plugin_cfg): - clients = context["users"][0]["credential"].clients() - extensions = clients.neutron().list_extensions()["extensions"] - aliases = [x["alias"] for x in extensions] - for extension in self.req_ext: - if extension not in aliases: - self.fail("Neutron extension %s is not configured" % extension) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="flavor_exists", platform="openstack") -class FlavorExistsValidator(validation.Validator): - - def __init__(self, param_name): - """Returns validator for flavor - - :param param_name: defines which variable should be used - to get flavor id value. - """ - super(FlavorExistsValidator, self).__init__() - - self.param_name = param_name - - def _get_flavor_from_context(self, config, flavor_value): - if "flavors" not in config.get("contexts", {}): - self.fail("No flavors context") - - flavors = [flavors_ctx.FlavorConfig(**f) - for f in config["contexts"]["flavors"]] - resource = types.obj_from_name(resource_config=flavor_value, - resources=flavors, typename="flavor") - flavor = flavors_ctx.FlavorConfig(**resource) - flavor.id = "" % flavor.name - return flavor - - def _get_validated_flavor(self, config, clients, param_name): - flavor_value = config.get("args", {}).get(param_name) - if not flavor_value: - self.fail("Parameter %s is not specified." % param_name) - try: - flavor_processor = openstack_types.Flavor( - context={"admin": {"credential": clients.credential}}) - flavor_id = flavor_processor.pre_process(flavor_value, config={}) - flavor = clients.nova().flavors.get(flavor=flavor_id) - return flavor - except (nova_exc.NotFound, exceptions.InvalidScenarioArgument): - try: - return self._get_flavor_from_context(config, flavor_value) - except validation.ValidationError: - pass - self.fail("Flavor '%s' not found" % flavor_value) - - @with_roles_ctx() - def validate(self, context, config, plugin_cls, plugin_cfg): - # flavors do not depend on user or tenant, so checking for one user - # should be enough - clients = context["users"][0]["credential"].clients() - self._get_validated_flavor(config=config, - clients=clients, - param_name=self.param_name) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="image_valid_on_flavor", platform="openstack") -class ImageValidOnFlavorValidator(FlavorExistsValidator): - - def __init__(self, flavor_param, image_param, - fail_on_404_image=True, validate_disk=True): - """Returns validator for image could be used for current flavor - - :param flavor_param: defines which variable should be used - to get flavor id value. - :param image_param: defines which variable should be used - to get image id value. - :param validate_disk: flag to indicate whether to validate flavor's - disk. Should be True if instance is booted from - image. Should be False if instance is booted - from volume. Default value is True. - :param fail_on_404_image: flag what indicate whether to validate image - or not. - """ - super(ImageValidOnFlavorValidator, self).__init__(flavor_param) - self.image_name = image_param - self.fail_on_404_image = fail_on_404_image - self.validate_disk = validate_disk - - def _get_validated_image(self, config, clients, param_name): - image_context = config.get("contexts", {}).get("images", {}) - image_args = config.get("args", {}).get(param_name) - image_ctx_name = image_context.get("image_name") - - if not image_args: - self.fail("Parameter %s is not specified." % param_name) - - if "image_name" in image_context: - # NOTE(rvasilets) check string is "exactly equal to" a regex - # or image name from context equal to image name from args - if "regex" in image_args: - match = re.match(image_args.get("regex"), image_ctx_name) - if image_ctx_name == image_args.get("name") or ("regex" - in image_args - and match): - image = { - "size": image_context.get("min_disk", 0), - "min_ram": image_context.get("min_ram", 0), - "min_disk": image_context.get("min_disk", 0) - } - return image - try: - image_processor = openstack_types.GlanceImage( - context={"admin": {"credential": clients.credential}}) - image_id = image_processor.pre_process(image_args, config={}) - image = clients.glance().images.get(image_id) - if hasattr(image, "to_dict"): - # NOTE(stpierre): Glance v1 images are objects that can be - # converted to dicts; Glance v2 images are already - # dict-like - image = image.to_dict() - if not image.get("size"): - image["size"] = 0 - if not image.get("min_ram"): - image["min_ram"] = 0 - if not image.get("min_disk"): - image["min_disk"] = 0 - return image - except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument): - self.fail("Image '%s' not found" % image_args) - - @with_roles_ctx() - def validate(self, context, config, plugin_cls, plugin_cfg): - - flavor = None - for user in context["users"]: - clients = user["credential"].clients() - - if not flavor: - flavor = self._get_validated_flavor( - config, clients, self.param_name) - - try: - image = self._get_validated_image(config, clients, - self.image_name) - except validation.ValidationError: - if not self.fail_on_404_image: - return - raise - - if flavor.ram < image["min_ram"]: - self.fail("The memory size for flavor '%s' is too small " - "for requested image '%s'." % - (flavor.id, image["id"])) - - if flavor.disk and self.validate_disk: - if flavor.disk * (1024 ** 3) < image["size"]: - self.fail("The disk size for flavor '%s' is too small " - "for requested image '%s'." % - (flavor.id, image["id"])) - - if flavor.disk < image["min_disk"]: - self.fail("The minimal disk size for flavor '%s' is " - "too small for requested image '%s'." % - (flavor.id, image["id"])) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="required_clients", platform="openstack") -class RequiredClientsValidator(validation.Validator): - - def __init__(self, components, *args, **kwargs): - """Validator checks if specified OpenStack clients are available. - - :param components: list of client components names - :param **kwargs: optional parameters: - admin - bool, whether to use admin clients - """ - super(RequiredClientsValidator, self).__init__() - if isinstance(components, (list, tuple)): - # services argument is a list, so it is a new way of validators - # usage, args in this case should not be provided - self.components = components - if args: - LOG.warning("Positional argument is not what " - "'required_clients' decorator expects. " - "Use `components` argument instead") - else: - # it is old way validator - self.components = [components] - self.components.extend(args) - self.options = kwargs - - def _check_component(self, clients): - for client_component in self.components: - try: - getattr(clients, client_component)() - except ImportError: - self.fail( - "Client for {0} is not installed. To install it run " - "`pip install python-{0}client`".format(client_component)) - - def validate(self, context, config, plugin_cls, plugin_cfg): - LOG.warning("The validator 'required_clients' is deprecated since " - "Rally 0.10.0. If you are interested in it, please " - "contact Rally team via E-mail, IRC or Gitter (see " - "https://rally.readthedocs.io/en/latest/project_info" - "/index.html#where-can-i-discuss-and-propose-changes for " - "more details).") - if self.options.get("admin", False): - clients = context["admin"]["credential"].clients() - self._check_component(clients) - else: - for user in context["users"]: - clients = user["credential"].clients() - self._check_component(clients) - break - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="required_services", platform="openstack") -class RequiredServicesValidator(validation.Validator): - - def __init__(self, services, *args): - """Validator checks if specified OpenStack services are available. - - :param services: list with names of required services - """ - - super(RequiredServicesValidator, self).__init__() - if isinstance(services, (list, tuple)): - # services argument is a list, so it is a new way of validators - # usage, args in this case should not be provided - self.services = services - if args: - LOG.warning("Positional argument is not what " - "'required_services' decorator expects. " - "Use `services` argument instead") - else: - # it is old way validator - self.services = [services] - self.services.extend(args) - - def validate(self, context, config, plugin_cls, plugin_cfg): - creds = (context.get("admin", {}).get("credential", None) - or context["users"][0]["credential"]) - - available_services = creds.clients().services().values() - if consts.Service.NOVA_NET in self.services: - LOG.warning("We are sorry, but Nova-network was deprecated for " - "a long time and latest novaclient doesn't support " - "it, so we too.") - - for service in self.services: - # NOTE(andreykurilin): validator should ignore services configured - # via context(a proper validation should be in context) - service_config = config.get("contexts", {}).get( - "api_versions@openstack", {}).get(service, {}) - - if (service not in available_services and - not ("service_type" in service_config or - "service_name" in service_config)): - self.fail( - ("'{0}' service is not available. Hint: If '{0}' " - "service has non-default service_type, try to" - " setup it via 'api_versions'" - " context.").format(service)) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="validate_heat_template", platform="openstack") -class ValidateHeatTemplateValidator(validation.Validator): - - def __init__(self, params, *args): - """Validates heat template. - - :param params: list of parameters to be validated. - """ - super(ValidateHeatTemplateValidator, self).__init__() - if isinstance(params, (list, tuple)): - # services argument is a list, so it is a new way of validators - # usage, args in this case should not be provided - self.params = params - if args: - LOG.warning("Positional argument is not what " - "'validate_heat_template' decorator expects. " - "Use `params` argument instead") - else: - # it is old way validator - self.params = [params] - self.params.extend(args) - - @with_roles_ctx() - def validate(self, context, config, plugin_cls, plugin_cfg): - - for param_name in self.params: - template_path = config.get("args", {}).get(param_name) - if not template_path: - msg = ("Path to heat template is not specified. Its needed " - "for heat template validation. Please check the " - "content of `{}` scenario argument.") - - return self.fail(msg.format(param_name)) - template_path = os.path.expanduser(template_path) - if not os.path.exists(template_path): - self.fail("No file found by the given path %s" % template_path) - with open(template_path, "r") as f: - try: - for user in context["users"]: - clients = user["credential"].clients() - clients.heat().stacks.validate(template=f.read()) - except Exception as e: - self.fail("Heat template validation failed on %(path)s. " - "Original error message: %(msg)s." % - {"path": template_path, "msg": str(e)}) - - -@validation.add("required_platform", platform="openstack", admin=True) -@validation.configure(name="required_cinder_services", platform="openstack") -class RequiredCinderServicesValidator(validation.Validator): - - def __init__(self, services): - """Validator checks that specified Cinder service is available. - - It uses Cinder client with admin permissions to call - 'cinder service-list' call - - :param services: Cinder service name - """ - super(RequiredCinderServicesValidator, self).__init__() - self.services = services - - @with_roles_ctx() - def validate(self, context, config, plugin_cls, plugin_cfg): - - clients = context["admin"]["credential"].clients() - for service in clients.cinder().services.list(): - if (service.binary == six.text_type(self.services) - and service.state == six.text_type("up")): - return - - self.fail("%s service is not available" % self.services) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="required_api_versions", platform="openstack") -class RequiredAPIVersionsValidator(validation.Validator): - - def __init__(self, component, versions): - """Validator checks component API versions. - - :param component: name of required component - :param versions: version of required component - """ - super(RequiredAPIVersionsValidator, self).__init__() - self.component = component - self.versions = versions - - def validate(self, context, config, plugin_cls, plugin_cfg): - versions = [str(v) for v in self.versions] - versions_str = ", ".join(versions) - msg = ("Task was designed to be used with %(component)s " - "V%(version)s, but V%(found_version)s is " - "selected.") - for user in context["users"]: - clients = user["credential"].clients() - if self.component == "keystone": - if "2.0" not in versions and hasattr( - clients.keystone(), "tenants"): - self.fail(msg % {"component": self.component, - "version": versions_str, - "found_version": "2.0"}) - if "3" not in versions and hasattr( - clients.keystone(), "projects"): - self.fail(msg % {"component": self.component, - "version": versions_str, - "found_version": "3"}) - else: - av_ctx = config.get("contexts", {}).get( - "api_versions@openstack", {}) - default_version = getattr(clients, - self.component).choose_version() - used_version = av_ctx.get(self.component, {}).get( - "version", default_version) - if not used_version: - self.fail("Unable to determine the API version.") - if str(used_version) not in versions: - self.fail(msg % {"component": self.component, - "version": versions_str, - "found_version": used_version}) - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="volume_type_exists", platform="openstack") -class VolumeTypeExistsValidator(validation.Validator): - - def __init__(self, param_name, nullable=True): - """Returns validator for volume types. - - :param param_name: defines variable to be used as the flag to - determine if volume types should be checked for - existence. - :param nullable: defines volume_type param is required - """ - super(VolumeTypeExistsValidator, self).__init__() - self.param = param_name - self.nullable = nullable - - @with_roles_ctx() - def validate(self, context, config, plugin_cls, plugin_cfg): - volume_type = config.get("args", {}).get(self.param, False) - - if not volume_type: - if self.nullable: - return - - self.fail("The parameter '%s' is required and should not be empty." - % self.param) - - for user in context["users"]: - clients = user["credential"].clients() - vt_names = [vt.name for vt in - clients.cinder().volume_types.list()] - ctx = config.get("contexts", {}).get("volume_types", []) - vt_names += ctx - if volume_type not in vt_names: - self.fail("Specified volume type %s not found for user %s." - " List of available types: %s" % - (volume_type, user, vt_names)) - - -@validation.configure(name="workbook_contains_workflow", platform="openstack") -class WorkbookContainsWorkflowValidator(validators.FileExistsValidator): - - def __init__(self, workbook_param, workflow_param): - """Validate that workflow exist in workbook when workflow is passed - - :param workbook_param: parameter containing the workbook definition - :param workflow_param: parameter containing the workflow name - """ - super(WorkbookContainsWorkflowValidator, self).__init__(workflow_param) - self.workbook = workbook_param - self.workflow = workflow_param - - def validate(self, context, config, plugin_cls, plugin_cfg): - wf_name = config.get("args", {}).get(self.workflow) - if wf_name: - wb_path = config.get("args", {}).get(self.workbook) - wb_path = os.path.expanduser(wb_path) - self._file_access_ok(wb_path, mode=os.R_OK, - param_name=self.workbook) - - with open(wb_path, "r") as wb_def: - wb_def = yaml.safe_load(wb_def) - if wf_name not in wb_def["workflows"]: - self.fail("workflow '%s' not found in the definition '%s'" - % (wf_name, wb_def)) diff --git a/rally/plugins/openstack/verification/__init__.py b/rally/plugins/openstack/verification/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/verification/tempest/__init__.py b/rally/plugins/openstack/verification/tempest/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/verification/tempest/config.ini b/rally/plugins/openstack/verification/tempest/config.ini deleted file mode 100644 index 4d64e8f648..0000000000 --- a/rally/plugins/openstack/verification/tempest/config.ini +++ /dev/null @@ -1,59 +0,0 @@ -[DEFAULT] -debug = True -use_stderr = False -log_file = - -[auth] -use_dynamic_credentials = True - -[compute] -image_ref = -image_ref_alt = -flavor_ref = -flavor_ref_alt = -fixed_network_name = - -[compute-feature-enabled] -live_migration = False -resize = True -vnc_console = True -attach_encrypted_volume = False - -[data-processing] - -[identity] - -[identity-feature-enabled] - -[image-feature-enabled] -deactivate_image = True - -[input-scenario] -ssh_user_regex = [["^.*[Cc]irros.*$", "cirros"], ["^.*[Tt]est[VvMm].*$", "cirros"], ["^.*rally_verify.*$", "cirros"]] - -[network] - -[network-feature-enabled] -ipv6_subnet_attributes = True -ipv6 = True - -[object-storage] - -[oslo_concurrency] -lock_path = - -[orchestration] -instance_type = - -[scenario] -img_dir = -img_file = - -[service_available] - -[validation] -run_validation = True -image_ssh_user = cirros - -[volume-feature-enabled] -bootable = True diff --git a/rally/plugins/openstack/verification/tempest/config.py b/rally/plugins/openstack/verification/tempest/config.py deleted file mode 100644 index 4bf083877d..0000000000 --- a/rally/plugins/openstack/verification/tempest/config.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import inspect -import os - -import six -from six.moves import configparser - -from rally.common import cfg -from rally.common import logging -from rally import exceptions -from rally.verification import utils - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class TempestConfigfileManager(object): - """Class to create a Tempest config file.""" - - def __init__(self, deployment): - self.credential = deployment.get_credentials_for("openstack")["admin"] - self.clients = self.credential.clients() - self.available_services = self.clients.services().values() - - self.conf = configparser.ConfigParser() - - def _get_service_type_by_service_name(self, service_name): - for s_type, s_name in self.clients.services().items(): - if s_name == service_name: - return s_type - - def _configure_auth(self, section_name="auth"): - self.conf.set(section_name, "admin_username", - self.credential.username) - self.conf.set(section_name, "admin_password", - self.credential.password) - self.conf.set(section_name, "admin_project_name", - self.credential.tenant_name) - # Keystone v3 related parameter - self.conf.set(section_name, "admin_domain_name", - self.credential.user_domain_name or "Default") - - # Sahara has two service types: 'data_processing' and 'data-processing'. - # 'data_processing' is deprecated, but it can be used in previous OpenStack - # releases. So we need to configure the 'catalog_type' option to support - # environments where 'data_processing' is used as service type for Sahara. - def _configure_data_processing(self, section_name="data-processing"): - if "sahara" in self.available_services: - self.conf.set(section_name, "catalog_type", - self._get_service_type_by_service_name("sahara")) - - def _configure_identity(self, section_name="identity"): - self.conf.set(section_name, "region", - self.credential.region_name) - # discover keystone versions - - def get_versions(auth_url): - from keystoneauth1 import discover - from keystoneauth1 import session - - temp_session = session.Session( - verify=(self.credential.https_cacert or - not self.credential.https_insecure), - timeout=CONF.openstack_client_http_timeout) - data = discover.Discover(temp_session, auth_url).version_data() - return dict([(v["version"][0], v["url"]) for v in data]) - - # check the original auth_url without cropping versioning to identify - # the default version - - versions = get_versions(self.credential.auth_url) - cropped_auth_url = self.clients.keystone._remove_url_version() - if cropped_auth_url == self.credential.auth_url: - # the given auth_url doesn't contain version - if set(versions.keys()) == {2, 3}: - # ok, both versions of keystone are enabled, we can take urls - # there - uri = versions[2] - uri_v3 = versions[3] - target_version = 3 - elif set(versions.keys()) == {2} or set(versions.keys()) == {3}: - # only one version is available while discovering - - # get the most recent version - target_version = sorted(versions.keys())[-1] - if target_version == 2: - uri = versions[2] - uri_v3 = os.path.join(cropped_auth_url, "v3") - else: - # keystone v2 is disabled. let's do it explicitly - self.conf.set("identity-feature-enabled", "api_v2", - "False") - uri_v3 = versions[3] - uri = os.path.join(cropped_auth_url, "v2.0") - else: - # Does Keystone released new version of API ?! - LOG.debug("Discovered keystone versions: %s" % versions) - raise exceptions.RallyException("Failed to discover keystone " - "auth urls.") - - else: - if self.credential.auth_url.rstrip("/").endswith("v2.0"): - uri = self.credential.auth_url - uri_v3 = uri.replace("/v2.0", "/v3") - target_version = 2 - else: - uri_v3 = self.credential.auth_url - uri = uri_v3.replace("/v3", "/v2.0") - target_version = 3 - - self.conf.set(section_name, "auth_version", "v%s" % target_version) - self.conf.set(section_name, "uri", uri) - self.conf.set(section_name, "uri_v3", uri_v3) - - self.conf.set(section_name, "disable_ssl_certificate_validation", - str(self.credential.https_insecure)) - self.conf.set(section_name, "ca_certificates_file", - self.credential.https_cacert) - - # The compute section is configured in context class for Tempest resources. - # Options which are configured there: 'image_ref', 'image_ref_alt', - # 'flavor_ref', 'flavor_ref_alt'. - - def _configure_network(self, section_name="network"): - if "neutron" in self.available_services: - neutronclient = self.clients.neutron() - public_nets = [net for net - in neutronclient.list_networks()["networks"] - if net["status"] == "ACTIVE" and - net["router:external"] is True] - if public_nets: - net_id = public_nets[0]["id"] - net_name = public_nets[0]["name"] - self.conf.set(section_name, "public_network_id", net_id) - self.conf.set(section_name, "floating_network_name", net_name) - else: - novaclient = self.clients.nova() - net_name = next(net.human_id for net in novaclient.networks.list() - if net.human_id is not None) - self.conf.set("compute", "fixed_network_name", net_name) - self.conf.set("validation", "network_for_ssh", net_name) - - def _configure_network_feature_enabled( - self, section_name="network-feature-enabled"): - if "neutron" in self.available_services: - neutronclient = self.clients.neutron() - extensions = neutronclient.list_ext("extensions", "/extensions", - retrieve_all=True) - aliases = [ext["alias"] for ext in extensions["extensions"]] - aliases_str = ",".join(aliases) - self.conf.set(section_name, "api_extensions", aliases_str) - - def _configure_object_storage(self, section_name="object-storage"): - self.conf.set(section_name, "operator_role", - CONF.openstack.swift_operator_role) - self.conf.set(section_name, "reseller_admin_role", - CONF.openstack.swift_reseller_admin_role) - - def _configure_service_available(self, section_name="service_available"): - services = ["cinder", "glance", "heat", "ironic", "neutron", "nova", - "sahara", "swift"] - for service in services: - # Convert boolean to string because ConfigParser fails - # on attempt to get option with boolean value - self.conf.set(section_name, service, - str(service in self.available_services)) - - def _configure_validation(self, section_name="validation"): - if "neutron" in self.available_services: - self.conf.set(section_name, "connect_method", "floating") - else: - self.conf.set(section_name, "connect_method", "fixed") - - def _configure_orchestration(self, section_name="orchestration"): - self.conf.set(section_name, "stack_owner_role", - CONF.openstack.heat_stack_owner_role) - self.conf.set(section_name, "stack_user_role", - CONF.openstack.heat_stack_user_role) - - def create(self, conf_path, extra_options=None): - self.conf.read(os.path.join(os.path.dirname(__file__), "config.ini")) - - for name, method in inspect.getmembers(self, inspect.ismethod): - if name.startswith("_configure_"): - method() - - if extra_options: - utils.add_extra_options(extra_options, self.conf) - - with open(conf_path, "w") as configfile: - self.conf.write(configfile) - - raw_conf = six.StringIO() - raw_conf.write("# Some empty values of options will be replaced while " - "creating required resources (images, flavors, etc).\n") - self.conf.write(raw_conf) - - return raw_conf.getvalue() diff --git a/rally/plugins/openstack/verification/tempest/consts.py b/rally/plugins/openstack/verification/tempest/consts.py deleted file mode 100644 index fa35bc2694..0000000000 --- a/rally/plugins/openstack/verification/tempest/consts.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import utils - - -class _TempestApiTestSets(utils.ImmutableMixin, utils.EnumMixin): - BAREMETAL = "baremetal" - CLUSTERING = "clustering" - COMPUTE = "compute" - DATA_PROCESSING = "data_processing" - DATABASE = "database" - IDENTITY = "identity" - IMAGE = "image" - MESSAGING = "messaging" - NETWORK = "network" - OBJECT_STORAGE = "object_storage" - ORCHESTRATION = "orchestration" - TELEMETRY = "telemetry" - VOLUME = "volume" - - -class _TempestScenarioTestSets(utils.ImmutableMixin, utils.EnumMixin): - SCENARIO = "scenario" - - -class _TempestTestSets(utils.ImmutableMixin, utils.EnumMixin): - FULL = "full" - SMOKE = "smoke" - - -TempestApiTestSets = _TempestApiTestSets() -TempestScenarioTestSets = _TempestScenarioTestSets() -TempestTestSets = _TempestTestSets() diff --git a/rally/plugins/openstack/verification/tempest/context.py b/rally/plugins/openstack/verification/tempest/context.py deleted file mode 100644 index 755a324044..0000000000 --- a/rally/plugins/openstack/verification/tempest/context.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re - -import requests -from six.moves import configparser - -from rally.common import logging -from rally import exceptions -from rally.plugins.openstack.services.image import image -from rally.plugins.openstack.verification.tempest import config as conf -from rally.plugins.openstack.wrappers import network -from rally.task import utils as task_utils -from rally.verification import context -from rally.verification import utils - - -LOG = logging.getLogger(__name__) - - -@context.configure("tempest", order=900) -class TempestContext(context.VerifierContext): - """Context class to create/delete resources needed for Tempest.""" - - RESOURCE_NAME_FORMAT = "rally_verify_XXXXXXXX_XXXXXXXX" - - def __init__(self, ctx): - super(TempestContext, self).__init__(ctx) - - creds = self.verifier.deployment.get_credentials_for("openstack") - self.clients = creds["admin"].clients() - self.available_services = self.clients.services().values() - - self.conf = configparser.ConfigParser() - self.conf_path = self.verifier.manager.configfile - - self.data_dir = self.verifier.manager.home_dir - self.image_name = "tempest-image" - - self._created_roles = [] - self._created_images = [] - self._created_flavors = [] - self._created_networks = [] - - def setup(self): - self.conf.read(self.conf_path) - - utils.create_dir(self.data_dir) - - self._create_tempest_roles() - - self._configure_option("DEFAULT", "log_file", - os.path.join(self.data_dir, "tempest.log")) - self._configure_option("oslo_concurrency", "lock_path", - os.path.join(self.data_dir, "lock_files")) - self._configure_option("scenario", "img_dir", self.data_dir) - self._configure_option("scenario", "img_file", self.image_name, - helper_method=self._download_image) - self._configure_option("compute", "image_ref", - helper_method=self._discover_or_create_image) - self._configure_option("compute", "image_ref_alt", - helper_method=self._discover_or_create_image) - self._configure_option("compute", "flavor_ref", - helper_method=self._discover_or_create_flavor, - flv_ram=conf.CONF.openstack.flavor_ref_ram) - self._configure_option("compute", "flavor_ref_alt", - helper_method=self._discover_or_create_flavor, - flv_ram=conf.CONF.openstack.flavor_ref_alt_ram) - if "neutron" in self.available_services: - neutronclient = self.clients.neutron() - if neutronclient.list_networks(shared=True)["networks"]: - # If the OpenStack cloud has some shared networks, we will - # create our own shared network and specify its name in the - # Tempest config file. Such approach will allow us to avoid - # failures of Tempest tests with error "Multiple possible - # networks found". Otherwise the default behavior defined in - # Tempest will be used and Tempest itself will manage network - # resources. - LOG.debug("Shared networks found. " - "'fixed_network_name' option should be configured.") - self._configure_option( - "compute", "fixed_network_name", - helper_method=self._create_network_resources) - if "heat" in self.available_services: - self._configure_option( - "orchestration", "instance_type", - helper_method=self._discover_or_create_flavor, - flv_ram=conf.CONF.openstack.heat_instance_type_ram) - - with open(self.conf_path, "w") as configfile: - self.conf.write(configfile) - - def cleanup(self): - # Tempest tests may take more than 1 hour and we should remove all - # cached clients sessions to avoid tokens expiration when deleting - # Tempest resources. - self.clients.clear() - - self._cleanup_tempest_roles() - self._cleanup_images() - self._cleanup_flavors() - if "neutron" in self.available_services: - self._cleanup_network_resources() - - with open(self.conf_path, "w") as configfile: - self.conf.write(configfile) - - def _create_tempest_roles(self): - keystoneclient = self.clients.verified_keystone() - roles = [conf.CONF.openstack.swift_operator_role, - conf.CONF.openstack.swift_reseller_admin_role, - conf.CONF.openstack.heat_stack_owner_role, - conf.CONF.openstack.heat_stack_user_role] - existing_roles = set(role.name for role in keystoneclient.roles.list()) - - for role in roles: - if role not in existing_roles: - LOG.debug("Creating role '%s'." % role) - self._created_roles.append(keystoneclient.roles.create(role)) - - def _configure_option(self, section, option, value=None, - helper_method=None, *args, **kwargs): - option_value = self.conf.get(section, option) - if not option_value: - LOG.debug("Option '%s' from '%s' section is not configured." - % (option, section)) - if helper_method: - res = helper_method(*args, **kwargs) - if res: - value = res["name"] if "network" in option else res.id - LOG.debug("Setting value '%s' to option '%s'." % (value, option)) - self.conf.set(section, option, value) - LOG.debug("Option '{opt}' is configured. " - "{opt} = {value}".format(opt=option, value=value)) - else: - LOG.debug("Option '{opt}' is already configured " - "in Tempest config file. {opt} = {opt_val}" - .format(opt=option, opt_val=option_value)) - - def _discover_image(self): - LOG.debug("Trying to discover a public image with name matching " - "regular expression '%s'. Note that case insensitive " - "matching is performed." - % conf.CONF.openstack.img_name_regex) - image_service = image.Image(self.clients) - images = image_service.list_images(status="active", - visibility="public") - for image_obj in images: - if image_obj.name and re.match(conf.CONF.openstack.img_name_regex, - image_obj.name, re.IGNORECASE): - LOG.debug("The following public image discovered: '%s'." - % image_obj.name) - return image_obj - - LOG.debug("There is no public image with name matching regular " - "expression '%s'." % conf.CONF.openstack.img_name_regex) - - def _download_image_from_source(self, target_path, image=None): - if image: - LOG.debug("Downloading image '%s' from Glance to %s." - % (image.name, target_path)) - with open(target_path, "wb") as image_file: - for chunk in self.clients.glance().images.data(image.id): - image_file.write(chunk) - else: - LOG.debug("Downloading image from %s to %s." - % (conf.CONF.openstack.img_url, target_path)) - try: - response = requests.get(conf.CONF.openstack.img_url, - stream=True) - except requests.ConnectionError as err: - msg = ("Failed to download image. Possibly there is no " - "connection to Internet. Error: %s." - % (str(err) or "unknown")) - raise exceptions.RallyException(msg) - - if response.status_code == 200: - with open(target_path, "wb") as image_file: - for chunk in response.iter_content(chunk_size=1024): - if chunk: # filter out keep-alive new chunks - image_file.write(chunk) - image_file.flush() - else: - if response.status_code == 404: - msg = "Failed to download image. Image was not found." - else: - msg = ("Failed to download image. HTTP error code %d." - % response.status_code) - raise exceptions.RallyException(msg) - - LOG.debug("The image has been successfully downloaded!") - - def _download_image(self): - image_path = os.path.join(self.data_dir, self.image_name) - if os.path.isfile(image_path): - LOG.debug("Image is already downloaded to %s." % image_path) - return - - if conf.CONF.openstack.img_name_regex: - image = self._discover_image() - if image: - return self._download_image_from_source(image_path, image) - - self._download_image_from_source(image_path) - - def _discover_or_create_image(self): - if conf.CONF.openstack.img_name_regex: - image_obj = self._discover_image() - if image_obj: - LOG.debug("Using image '%s' (ID = %s) for the tests." - % (image_obj.name, image_obj.id)) - return image_obj - - params = { - "image_name": self.generate_random_name(), - "disk_format": conf.CONF.openstack.img_disk_format, - "container_format": conf.CONF.openstack.img_container_format, - "image_location": os.path.join(self.data_dir, self.image_name), - "visibility": "public" - } - LOG.debug("Creating image '%s'." % params["image_name"]) - image_service = image.Image(self.clients) - image_obj = image_service.create_image(**params) - LOG.debug("Image '%s' (ID = %s) has been successfully created!" - % (image_obj.name, image_obj.id)) - self._created_images.append(image_obj) - - return image_obj - - def _discover_or_create_flavor(self, flv_ram): - novaclient = self.clients.nova() - - LOG.debug("Trying to discover a flavor with the following " - "properties: RAM = %dMB, VCPUs = 1, disk = 0GB." % flv_ram) - for flavor in novaclient.flavors.list(): - if (flavor.ram == flv_ram and - flavor.vcpus == 1 and flavor.disk == 0): - LOG.debug("The following flavor discovered: '{0}'. " - "Using flavor '{0}' (ID = {1}) for the tests." - .format(flavor.name, flavor.id)) - return flavor - - LOG.debug("There is no flavor with the mentioned properties.") - - params = { - "name": self.generate_random_name(), - "ram": flv_ram, - "vcpus": 1, - "disk": 0 - } - LOG.debug("Creating flavor '%s' with the following properties: RAM " - "= %dMB, VCPUs = 1, disk = 0GB." % (params["name"], flv_ram)) - flavor = novaclient.flavors.create(**params) - LOG.debug("Flavor '%s' (ID = %s) has been successfully created!" - % (flavor.name, flavor.id)) - self._created_flavors.append(flavor) - - return flavor - - def _create_network_resources(self): - neutron_wrapper = network.NeutronWrapper(self.clients, self) - tenant_id = self.clients.keystone.auth_ref.project_id - LOG.debug("Creating network resources: network, subnet, router.") - net = neutron_wrapper.create_network( - tenant_id, subnets_num=1, add_router=True, - network_create_args={"shared": True}) - LOG.debug("Network resources have been successfully created!") - self._created_networks.append(net) - - return net - - def _cleanup_tempest_roles(self): - keystoneclient = self.clients.keystone() - for role in self._created_roles: - LOG.debug("Deleting role '%s'." % role.name) - keystoneclient.roles.delete(role.id) - LOG.debug("Role '%s' has been deleted." % role.name) - - def _cleanup_images(self): - image_service = image.Image(self.clients) - for image_obj in self._created_images: - LOG.debug("Deleting image '%s'." % image_obj.name) - self.clients.glance().images.delete(image_obj.id) - task_utils.wait_for_status( - image_obj, ["deleted", "pending_delete"], - check_deletion=True, - update_resource=image_service.get_image, - timeout=conf.CONF.openstack.glance_image_delete_timeout, - check_interval=conf.CONF.openstack. - glance_image_delete_poll_interval) - LOG.debug("Image '%s' has been deleted." % image_obj.name) - self._remove_opt_value_from_config("compute", image_obj.id) - - def _cleanup_flavors(self): - novaclient = self.clients.nova() - for flavor in self._created_flavors: - LOG.debug("Deleting flavor '%s'." % flavor.name) - novaclient.flavors.delete(flavor.id) - LOG.debug("Flavor '%s' has been deleted." % flavor.name) - self._remove_opt_value_from_config("compute", flavor.id) - self._remove_opt_value_from_config("orchestration", flavor.id) - - def _cleanup_network_resources(self): - neutron_wrapper = network.NeutronWrapper(self.clients, self) - for net in self._created_networks: - LOG.debug("Deleting network resources: router, subnet, network.") - neutron_wrapper.delete_network(net) - self._remove_opt_value_from_config("compute", net["name"]) - LOG.debug("Network resources have been deleted.") - - def _remove_opt_value_from_config(self, section, opt_value): - for option, value in self.conf.items(section): - if opt_value == value: - LOG.debug("Removing value '%s' of option '%s' " - "from Tempest config file." % (opt_value, option)) - self.conf.set(section, option, "") - LOG.debug("Value '%s' has been removed." % opt_value) diff --git a/rally/plugins/openstack/verification/tempest/manager.py b/rally/plugins/openstack/verification/tempest/manager.py deleted file mode 100644 index 761a9cb18f..0000000000 --- a/rally/plugins/openstack/verification/tempest/manager.py +++ /dev/null @@ -1,215 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import re -import shutil -import subprocess - -from rally.common import yamlutils as yaml -from rally import exceptions -from rally.plugins.common.verification import testr -from rally.plugins.openstack.verification.tempest import config -from rally.plugins.openstack.verification.tempest import consts -from rally.verification import manager -from rally.verification import utils - - -AVAILABLE_SETS = (list(consts.TempestTestSets) + - list(consts.TempestApiTestSets) + - list(consts.TempestScenarioTestSets)) - - -@manager.configure(name="tempest", platform="openstack", - default_repo="https://git.openstack.org/openstack/tempest", - context={"tempest": {}, "testr": {}}) -class TempestManager(testr.TestrLauncher): - """Tempest verifier. - - **Description**: - - Quote from official documentation: - - This is a set of integration tests to be run against a live OpenStack - cluster. Tempest has batteries of tests for OpenStack API validation, - Scenarios, and other specific tests useful in validating an OpenStack - deployment. - - Rally supports features listed below: - - * *cloning Tempest*: repository and version can be specified - * *installation*: system-wide with checking existence of required - packages or in virtual environment - * *configuration*: options are discovered via OpenStack API, but you can - override them if you need - * *running*: pre-creating all required resources(i.e images, tenants, - etc), prepare arguments, launching Tempest, live-progress output - * *results*: all verifications are stored in db, you can built reports, - compare verification at whatever you want time. - - Appeared in Rally 0.8.0 *(actually, it appeared long time ago with first - revision of Verification Component, but 0.8.0 is mentioned since it is - first release after Verification Component redesign)* - """ - - RUN_ARGS = {"set": "Name of predefined set of tests. Known names: %s" - % ", ".join(AVAILABLE_SETS)} - - @property - def run_environ(self): - env = super(TempestManager, self).run_environ - env["TEMPEST_CONFIG_DIR"] = os.path.dirname(self.configfile) - env["TEMPEST_CONFIG"] = os.path.basename(self.configfile) - # TODO(andreykurilin): move it to Testr base class - env["OS_TEST_PATH"] = os.path.join(self.repo_dir, - "tempest/test_discover") - return env - - @property - def configfile(self): - return os.path.join(self.home_dir, "tempest.conf") - - def validate_args(self, args): - """Validate given arguments.""" - super(TempestManager, self).validate_args(args) - - if args.get("pattern"): - pattern = args["pattern"].split("=", 1) - if len(pattern) == 1: - pass # it is just a regex - elif pattern[0] == "set": - if pattern[1] not in AVAILABLE_SETS: - raise exceptions.ValidationError( - "Test set '%s' not found in available " - "Tempest test sets. Available sets are '%s'." - % (pattern[1], "', '".join(AVAILABLE_SETS))) - else: - raise exceptions.ValidationError( - "'pattern' argument should be a regexp or set name " - "(format: 'tempest.api.identity.v3', 'set=smoke').") - - def configure(self, extra_options=None): - """Configure Tempest.""" - utils.create_dir(self.home_dir) - tcm = config.TempestConfigfileManager(self.verifier.deployment) - return tcm.create(self.configfile, extra_options) - - def is_configured(self): - """Check whether Tempest is configured or not.""" - return os.path.exists(self.configfile) - - def get_configuration(self): - """Get Tempest configuration.""" - with open(self.configfile) as f: - return f.read() - - def extend_configuration(self, extra_options): - """Extend Tempest configuration with extra options.""" - return utils.extend_configfile(extra_options, self.configfile) - - def override_configuration(self, new_configuration): - """Override Tempest configuration by new configuration.""" - with open(self.configfile, "w") as f: - f.write(new_configuration) - - def install_extension(self, source, version=None, extra_settings=None): - """Install a Tempest plugin.""" - if extra_settings: - raise NotImplementedError( - "'%s' verifiers don't support extra installation settings " - "for extensions." % self.get_name()) - version = version or "master" - egg = re.sub("\.git$", "", os.path.basename(source.strip("/"))) - full_source = "git+{0}@{1}#egg={2}".format(source, version, egg) - # NOTE(ylobankov): Use 'develop mode' installation to provide an - # ability to advanced users to change tests or - # develop new ones in verifier repo on the fly. - cmd = ["pip", "install", - "--src", os.path.join(self.base_dir, "extensions"), - "-e", full_source] - if self.verifier.system_wide: - cmd.insert(2, "--no-deps") - utils.check_output(cmd, cwd=self.base_dir, env=self.environ) - - # Very often Tempest plugins are inside projects and requirements - # for plugins are listed in the test-requirements.txt file. - test_reqs_path = os.path.join(self.base_dir, "extensions", - egg, "test-requirements.txt") - if os.path.exists(test_reqs_path): - if not self.verifier.system_wide: - utils.check_output(["pip", "install", "-r", test_reqs_path], - cwd=self.base_dir, env=self.environ) - else: - self.check_system_wide(reqs_file_path=test_reqs_path) - - def list_extensions(self): - """List all installed Tempest plugins.""" - # TODO(andreykurilin): find a better way to list tempest plugins - cmd = ("from tempest.test_discover import plugins; " - "plugins_manager = plugins.TempestTestPluginManager(); " - "plugins_map = plugins_manager.get_plugin_load_tests_tuple(); " - "plugins_list = [" - " {'name': p.name, " - " 'entry_point': p.entry_point_target, " - " 'location': plugins_map[p.name][1]} " - " for p in plugins_manager.ext_plugins.extensions]; " - "print(plugins_list)") - try: - output = utils.check_output(["python", "-c", cmd], - cwd=self.base_dir, env=self.environ, - debug_output=False).strip() - except subprocess.CalledProcessError: - raise exceptions.RallyException( - "Cannot list installed Tempest plugins for verifier %s." % - self.verifier) - - return yaml.safe_load(output) - - def uninstall_extension(self, name): - """Uninstall a Tempest plugin.""" - for ext in self.list_extensions(): - if ext["name"] == name and os.path.exists(ext["location"]): - shutil.rmtree(ext["location"]) - break - else: - raise exceptions.RallyException( - "There is no Tempest plugin with name '%s'. " - "Are you sure that it was installed?" % name) - - def list_tests(self, pattern=""): - """List all Tempest tests.""" - if pattern: - pattern = self._transform_pattern(pattern) - return super(TempestManager, self).list_tests(pattern) - - def prepare_run_args(self, run_args): - """Prepare 'run_args' for testr context.""" - if run_args.get("pattern"): - run_args["pattern"] = self._transform_pattern(run_args["pattern"]) - return run_args - - @staticmethod - def _transform_pattern(pattern): - """Transform pattern into Tempest-specific pattern.""" - parsed_pattern = pattern.split("=", 1) - if len(parsed_pattern) == 2: - if parsed_pattern[0] == "set": - if parsed_pattern[1] in consts.TempestTestSets: - return "smoke" if parsed_pattern[1] == "smoke" else "" - elif parsed_pattern[1] in consts.TempestApiTestSets: - return "tempest.api.%s" % parsed_pattern[1] - else: - return "tempest.%s" % parsed_pattern[1] - - return pattern # it is just a regex diff --git a/rally/plugins/openstack/wrappers/__init__.py b/rally/plugins/openstack/wrappers/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/openstack/wrappers/cinder.py b/rally/plugins/openstack/wrappers/cinder.py deleted file mode 100644 index 483885f93c..0000000000 --- a/rally/plugins/openstack/wrappers/cinder.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from rally.common import logging -from rally import exceptions - -import six - -LOG = logging.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class CinderWrapper(object): - def __init__(self, client, owner): - self.owner = owner - self.client = client - - @abc.abstractmethod - def create_volume(self, volume): - """Creates new volume.""" - - @abc.abstractmethod - def update_volume(self, volume): - """Updates name and description for this volume.""" - - @abc.abstractmethod - def create_snapshot(self, volume_id): - """Creates a volume snapshot.""" - - -class CinderV1Wrapper(CinderWrapper): - def create_volume(self, size, **kwargs): - kwargs["display_name"] = self.owner.generate_random_name() - volume = self.client.volumes.create(size, **kwargs) - return volume - - def update_volume(self, volume, **update_args): - update_args["display_name"] = self.owner.generate_random_name() - update_args["display_description"] = ( - update_args.get("display_description")) - self.client.volumes.update(volume, **update_args) - - def create_snapshot(self, volume_id, **kwargs): - kwargs["display_name"] = self.owner.generate_random_name() - snapshot = self.client.volume_snapshots.create(volume_id, **kwargs) - return snapshot - - -class CinderV2Wrapper(CinderWrapper): - def create_volume(self, size, **kwargs): - kwargs["name"] = self.owner.generate_random_name() - - volume = self.client.volumes.create(size, **kwargs) - return volume - - def update_volume(self, volume, **update_args): - update_args["name"] = self.owner.generate_random_name() - update_args["description"] = update_args.get("description") - self.client.volumes.update(volume, **update_args) - - def create_snapshot(self, volume_id, **kwargs): - kwargs["name"] = self.owner.generate_random_name() - snapshot = self.client.volume_snapshots.create(volume_id, **kwargs) - return snapshot - - -def wrap(client, owner): - """Returns cinderclient wrapper based on cinder client version.""" - LOG.warning("Method wrap from %s and whole Cinder wrappers are " - "deprecated since Rally 0.10.0 and will be removed soon. Use " - "rally.plugins.openstack.services.storage.block.BlockStorage " - "instead." % __file__) - version = client.choose_version() - if version == "1": - return CinderV1Wrapper(client(), owner) - elif version == "2": - return CinderV2Wrapper(client(), owner) - else: - msg = "This version of API %s could not be identified." % version - LOG.warning(msg) - raise exceptions.InvalidArgumentsException(msg) diff --git a/rally/plugins/openstack/wrappers/glance.py b/rally/plugins/openstack/wrappers/glance.py deleted file mode 100644 index b0c70533c3..0000000000 --- a/rally/plugins/openstack/wrappers/glance.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import os -import time - -from glanceclient import exc as glance_exc -import requests -import six - -from rally.common import cfg -from rally.common import logging -from rally.common import utils as rutils -from rally import exceptions -from rally.task import utils - - -LOG = logging.getLogger(__name__) - - -CONF = cfg.CONF - - -@six.add_metaclass(abc.ABCMeta) -class GlanceWrapper(object): - def __init__(self, client, owner): - self.owner = owner - self.client = client - - def get_image(self, image): - """Gets image. - - This serves to fetch the latest data on the image for the - various wait_for_*() functions. - Must raise rally.exceptions.GetResourceNotFound if the - resource is not found or deleted. - """ - # NOTE(stpierre): This function actually has a single - # implementation that works for both Glance v1 and Glance v2, - # but since we need to use this function in both wrappers, it - # gets implemented here. - try: - return self.client.images.get(image.id) - except glance_exc.HTTPNotFound: - raise exceptions.GetResourceNotFound(resource=image) - - @abc.abstractmethod - def create_image(self, container_format, image_location, disk_format): - """Creates new image. - - Accepts all Glance v2 parameters. - """ - - @abc.abstractmethod - def set_visibility(self, image, visibility="public"): - """Set an existing image to public or private.""" - - @abc.abstractmethod - def list_images(self, **filters): - """List images. - - Accepts all Glance v2 filters. - """ - - -class GlanceV1Wrapper(GlanceWrapper): - def create_image(self, container_format, image_location, - disk_format, **kwargs): - kw = { - "container_format": container_format, - "disk_format": disk_format, - } - kw.update(kwargs) - if "name" not in kw: - kw["name"] = self.owner.generate_random_name() - if "visibility" in kw: - kw["is_public"] = kw.pop("visibility") == "public" - - image_location = os.path.expanduser(image_location) - - try: - if os.path.isfile(image_location): - kw["data"] = open(image_location) - else: - kw["copy_from"] = image_location - - image = self.client.images.create(**kw) - - rutils.interruptable_sleep(CONF.openstack. - glance_image_create_prepoll_delay) - - image = utils.wait_for_status( - image, ["active"], - update_resource=self.get_image, - timeout=CONF.openstack.glance_image_create_timeout, - check_interval=CONF.openstack. - glance_image_create_poll_interval) - finally: - if "data" in kw: - kw["data"].close() - - return image - - def set_visibility(self, image, visibility="public"): - self.client.images.update(image.id, is_public=(visibility == "public")) - - def list_images(self, **filters): - kwargs = {"filters": filters} - if "owner" in filters: - # NOTE(stpierre): in glance v1, "owner" is not a filter, - # so we need to handle it separately. - kwargs["owner"] = kwargs["filters"].pop("owner") - visibility = kwargs["filters"].pop("visibility", None) - images = self.client.images.list(**kwargs) - # NOTE(stpierre): Glance v1 isn't smart enough to filter on - # public/private images, so we have to do it manually. - if visibility is not None: - is_public = visibility == "public" - return [i for i in images if i.is_public is is_public] - return images - - -class GlanceV2Wrapper(GlanceWrapper): - def create_image(self, container_format, image_location, - disk_format, **kwargs): - kw = { - "container_format": container_format, - "disk_format": disk_format, - } - kw.update(kwargs) - if "name" not in kw: - kw["name"] = self.owner.generate_random_name() - if "is_public" in kw: - LOG.warning("is_public is not supported by Glance v2, and is " - "deprecated in Rally v0.8.0") - kw["visibility"] = "public" if kw.pop("is_public") else "private" - - image_location = os.path.expanduser(image_location) - - image = self.client.images.create(**kw) - - rutils.interruptable_sleep(CONF.openstack. - glance_image_create_prepoll_delay) - - start = time.time() - image = utils.wait_for_status( - image, ["queued"], - update_resource=self.get_image, - timeout=CONF.openstack.glance_image_create_timeout, - check_interval=CONF.openstack. - glance_image_create_poll_interval) - timeout = time.time() - start - - image_data = None - response = None - try: - if os.path.isfile(image_location): - image_data = open(image_location) - else: - response = requests.get(image_location, stream=True) - image_data = response.raw - self.client.images.upload(image.id, image_data) - finally: - if image_data is not None: - image_data.close() - if response is not None: - response.close() - - return utils.wait_for_status( - image, ["active"], - update_resource=self.get_image, - timeout=timeout, - check_interval=CONF.openstack. - glance_image_create_poll_interval) - - def set_visibility(self, image, visibility="public"): - self.client.images.update(image.id, visibility=visibility) - - def list_images(self, **filters): - return self.client.images.list(filters=filters) - - -def wrap(client, owner): - """Returns glanceclient wrapper based on glance client version.""" - LOG.warning("Method wrap from %s and whole Glance wrappers are " - "deprecated since Rally 0.10.0 and will be removed soon. Use " - "rally.plugins.openstack.services.image.image.Image " - "instead." % __file__) - - version = client.choose_version() - if version == "1": - return GlanceV1Wrapper(client(), owner) - elif version == "2": - return GlanceV2Wrapper(client(), owner) - else: - msg = "Version %s of the glance API could not be identified." % version - LOG.warning(msg) - raise exceptions.InvalidArgumentsException(msg) diff --git a/rally/plugins/openstack/wrappers/keystone.py b/rally/plugins/openstack/wrappers/keystone.py deleted file mode 100644 index 0114922ce7..0000000000 --- a/rally/plugins/openstack/wrappers/keystone.py +++ /dev/null @@ -1,273 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections - -from keystoneclient import exceptions -import six - -from rally.common import logging - - -LOG = logging.getLogger(__name__) - -Project = collections.namedtuple("Project", ["id", "name", "domain_id"]) -User = collections.namedtuple("User", - ["id", "name", "project_id", "domain_id"]) -Service = collections.namedtuple("Service", ["id", "name"]) -Role = collections.namedtuple("Role", ["id", "name"]) - - -@six.add_metaclass(abc.ABCMeta) -class KeystoneWrapper(object): - def __init__(self, client): - self.client = client - - LOG.warning( - "Class %s is deprecated since Rally 0.8.0 and will be removed " - "soon. Use " - "rally.plugins.openstack.services.identity.identity.Identity " - "instead." % self.__class__) - - def __getattr__(self, attr_name): - return getattr(self.client, attr_name) - - @abc.abstractmethod - def create_project(self, project_name, domain_name="Default"): - """Creates new project/tenant and return project object. - - :param project_name: Name of project to be created. - :param domain_name: Name or id of domain where to create project, for - implementations that don't support domains this - argument must be None or 'Default'. - """ - - @abc.abstractmethod - def delete_project(self, project_id): - """Deletes project.""" - - @abc.abstractmethod - def create_user(self, username, password, email=None, project_id=None, - domain_name="Default", default_role="member"): - """Create user. - - :param username: name of user - :param password: user password - :param project: user's default project - :param domain_name: Name or id of domain where to create project, for - implementations that don't support domains this - argument must be None or 'Default'. - :param default_role: user's default role - """ - - @abc.abstractmethod - def delete_user(self, user_id): - """Deletes user.""" - - @abc.abstractmethod - def list_users(self): - """List all users.""" - - @abc.abstractmethod - def list_projects(self): - """List all projects/tenants.""" - - def delete_service(self, service_id): - """Deletes service.""" - self.client.services.delete(service_id) - - def list_services(self): - """List all services.""" - return map(KeystoneWrapper._wrap_service, self.client.services.list()) - - def create_role(self, name, **kwargs): - """create a role. - - :param name: name of role - :param kwargs: Optional additional arguments for roles creation - """ - - def delete_role(self, role_id): - """Deletes role.""" - self.client.roles.delete(role_id) - - def list_roles(self): - """List all roles.""" - return map(KeystoneWrapper._wrap_role, self.client.roles.list()) - - @abc.abstractmethod - def add_role(self, role_id, user_id, project_id): - """Assign role to user.""" - - @abc.abstractmethod - def remove_role(self, role_id, user_id, project_id): - """Remove role from user.""" - - @staticmethod - def _wrap_service(service): - return Service(id=service.id, name=service.name) - - @staticmethod - def _wrap_role(role): - return Role(id=role.id, name=role.name) - - -class KeystoneV2Wrapper(KeystoneWrapper): - def _check_domain(self, domain_name): - if domain_name.lower() != "default": - raise NotImplementedError("Domain functionality not implemented " - "in Keystone v2") - - @staticmethod - def _wrap_v2_tenant(tenant): - return Project(id=tenant.id, name=tenant.name, domain_id="default") - - @staticmethod - def _wrap_v2_role(role): - return Role(id=role.id, name=role.name) - - @staticmethod - def _wrap_v2_user(user): - return User(id=user.id, name=user.name, - project_id=getattr(user, "tenantId", None), - domain_id="default") - - def create_project(self, project_name, domain_name="Default"): - self._check_domain(domain_name) - tenant = self.client.tenants.create(project_name) - return KeystoneV2Wrapper._wrap_v2_tenant(tenant) - - def delete_project(self, project_id): - self.client.tenants.delete(project_id) - - def create_user(self, username, password, email=None, project_id=None, - domain_name="Default", default_role="member"): - # NOTE(liuyulong): For v2 wrapper the `default_role` here is not used. - self._check_domain(domain_name) - user = self.client.users.create(username, password, email, project_id) - return KeystoneV2Wrapper._wrap_v2_user(user) - - def delete_user(self, user_id): - self.client.users.delete(user_id) - - def list_users(self): - return map(KeystoneV2Wrapper._wrap_v2_user, self.client.users.list()) - - def list_projects(self): - return map(KeystoneV2Wrapper._wrap_v2_tenant, - self.client.tenants.list()) - - def create_role(self, name): - role = self.client.roles.create(name) - return KeystoneV2Wrapper._wrap_v2_role(role) - - def add_role(self, role_id, user_id, project_id): - self.client.roles.add_user_role(user_id, role_id, tenant=project_id) - - def remove_role(self, role_id, user_id, project_id): - self.client.roles.remove_user_role(user_id, role_id, tenant=project_id) - - -class KeystoneV3Wrapper(KeystoneWrapper): - def _get_domain_id(self, domain_name_or_id): - try: - # First try to find domain by ID - return self.client.domains.get(domain_name_or_id).id - except exceptions.NotFound: - # Domain not found by ID, try to find it by name - domains = self.client.domains.list(name=domain_name_or_id) - if domains: - return domains[0].id - # Domain not found by name, raise original NotFound exception - raise - - @staticmethod - def _wrap_v3_project(project): - return Project(id=project.id, name=project.name, - domain_id=project.domain_id) - - @staticmethod - def _wrap_v3_role(role): - return Role(id=role.id, name=role.name) - - @staticmethod - def _wrap_v3_user(user): - # When user has default_project_id that is None user.default_project_id - # will raise AttributeError - project_id = getattr(user, "default_project_id", None) - return User(id=user.id, name=user.name, project_id=project_id, - domain_id=user.domain_id) - - def create_project(self, project_name, domain_name="Default"): - domain_id = self._get_domain_id(domain_name) - project = self.client.projects.create( - name=project_name, domain=domain_id) - return KeystoneV3Wrapper._wrap_v3_project(project) - - def delete_project(self, project_id): - self.client.projects.delete(project_id) - - def create_user(self, username, password, email=None, project_id=None, - domain_name="Default", default_role="member"): - domain_id = self._get_domain_id(domain_name) - user = self.client.users.create(name=username, password=password, - default_project=project_id, - email=email, domain=domain_id) - for role in self.client.roles.list(): - if default_role in role.name.lower(): - self.client.roles.grant(role.id, user=user.id, - project=project_id) - break - else: - LOG.warning( - "Unable to set %s role to created user." % default_role) - return KeystoneV3Wrapper._wrap_v3_user(user) - - def delete_user(self, user_id): - self.client.users.delete(user_id) - - def list_users(self): - return map(KeystoneV3Wrapper._wrap_v3_user, self.client.users.list()) - - def list_projects(self): - return map(KeystoneV3Wrapper._wrap_v3_project, - self.client.projects.list()) - - def create_role(self, name, domain, **kwargs): - role = self.client.roles.create(name, domain=domain, **kwargs) - return KeystoneV3Wrapper._wrap_v3_role(role) - - def add_role(self, role_id, user_id, project_id): - self.client.roles.grant(role_id, user=user_id, project=project_id) - - def remove_role(self, role_id, user_id, project_id): - self.client.roles.revoke(role_id, user=user_id, project=project_id) - - -def wrap(client): - """Returns keystone wrapper based on keystone client version.""" - LOG.warning("Method wrap from %s and whole Keystone wrappers are " - "deprecated since Rally 0.8.0 and will be removed soon. Use " - "rally.plugins.openstack.services.identity.identity.Identity " - "instead." % __file__) - - if client.version == "v2.0": - return KeystoneV2Wrapper(client) - elif client.version == "v3": - return KeystoneV3Wrapper(client) - else: - raise NotImplementedError( - "Wrapper for version %s is not implemented." % client.version) diff --git a/rally/plugins/openstack/wrappers/network.py b/rally/plugins/openstack/wrappers/network.py deleted file mode 100644 index 5ff09d536d..0000000000 --- a/rally/plugins/openstack/wrappers/network.py +++ /dev/null @@ -1,432 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import itertools - -import netaddr -import six - -from rally.common import cfg -from rally.common import logging -from rally.common import utils -from rally import consts -from rally import exceptions - -from neutronclient.common import exceptions as neutron_exceptions - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -cidr_incr = utils.RAMInt() -ipv6_cidr_incr = utils.RAMInt() - - -def generate_cidr(start_cidr="10.2.0.0/24"): - """Generate next CIDR for network or subnet, without IP overlapping. - - This is process and thread safe, because `cidr_incr' points to - value stored directly in RAM. This guarantees that CIDRs will be - serial and unique even under hard multiprocessing/threading load. - - :param start_cidr: start CIDR str - :returns: next available CIDR str - """ - if netaddr.IPNetwork(start_cidr).version == 4: - cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr))) - else: - cidr = str(netaddr.IPNetwork(start_cidr).next(next(ipv6_cidr_incr))) - LOG.debug("CIDR generated: %s" % cidr) - return cidr - - -class NetworkWrapperException(exceptions.RallyException): - error_code = 217 - msg_fmt = "%(message)s" - - -@six.add_metaclass(abc.ABCMeta) -class NetworkWrapper(object): - """Base class for network service implementations. - - We actually have two network services implementations, with different API: - NovaNetwork and Neutron. The idea is (at least to try) to use unified - service, which hides most differences and routines behind the scenes. - This allows to significantly re-use and simplify code. - """ - START_CIDR = "10.2.0.0/24" - START_IPV6_CIDR = "dead:beaf::/64" - SERVICE_IMPL = None - - def __init__(self, clients, owner, config=None): - """Returns available network wrapper instance. - - :param clients: rally.plugins.openstack.osclients.Clients instance - :param owner: The object that owns resources created by this - wrapper instance. It will be used to generate - random names, so must implement - rally.common.utils.RandomNameGeneratorMixin - :param config: The configuration of the network - wrapper. Currently only two config options are - recognized, 'start_cidr' and 'start_ipv6_cidr'. - :returns: NetworkWrapper subclass instance - """ - if hasattr(clients, self.SERVICE_IMPL): - self.client = getattr(clients, self.SERVICE_IMPL)() - else: - self.client = clients(self.SERVICE_IMPL) - self.config = config or {} - self.owner = owner - self.start_cidr = self.config.get("start_cidr", self.START_CIDR) - self.start_ipv6_cidr = self.config.get( - "start_ipv6_cidr", self.START_IPV6_CIDR) - - @abc.abstractmethod - def create_network(self): - """Create network.""" - - @abc.abstractmethod - def delete_network(self): - """Delete network.""" - - @abc.abstractmethod - def list_networks(self): - """List networks.""" - - @abc.abstractmethod - def create_floating_ip(self): - """Create floating IP.""" - - @abc.abstractmethod - def delete_floating_ip(self): - """Delete floating IP.""" - - @abc.abstractmethod - def supports_extension(self): - """Checks whether a network extension is supported.""" - - -class NeutronWrapper(NetworkWrapper): - SERVICE_IMPL = consts.Service.NEUTRON - SUBNET_IP_VERSION = 4 - SUBNET_IPV6_VERSION = 6 - LB_METHOD = "ROUND_ROBIN" - LB_PROTOCOL = "HTTP" - - @property - def external_networks(self): - return self.client.list_networks(**{ - "router:external": True})["networks"] - - @property - def ext_gw_mode_enabled(self): - """Determine if the ext-gw-mode extension is enabled. - - Without this extension, we can't pass the enable_snat parameter. - """ - return any(e["alias"] == "ext-gw-mode" - for e in self.client.list_extensions()["extensions"]) - - def get_network(self, net_id=None, name=None): - net = None - try: - if net_id: - net = self.client.show_network(net_id)["network"] - else: - for net in self.client.list_networks(name=name)["networks"]: - break - return {"id": net["id"], - "name": net["name"], - "tenant_id": net["tenant_id"], - "status": net["status"], - "external": net["router:external"], - "subnets": net["subnets"], - "router_id": None} - except (TypeError, neutron_exceptions.NeutronClientException): - raise NetworkWrapperException( - "Network not found: %s" % (name or net_id)) - - def create_router(self, external=False, **kwargs): - """Create neutron router. - - :param external: bool, whether to set setup external_gateway_info - :param **kwargs: POST /v2.0/routers request options - :returns: neutron router dict - """ - kwargs["name"] = self.owner.generate_random_name() - - if external and "external_gateway_info" not in kwargs: - for net in self.external_networks: - kwargs["external_gateway_info"] = {"network_id": net["id"]} - if self.ext_gw_mode_enabled: - kwargs["external_gateway_info"]["enable_snat"] = True - return self.client.create_router({"router": kwargs})["router"] - - def create_v1_pool(self, tenant_id, subnet_id, **kwargs): - """Create LB Pool (v1). - - :param tenant_id: str, pool tenant id - :param subnet_id: str, neutron subnet-id - :param **kwargs: extra options - :returns: neutron lb-pool dict - """ - pool_args = { - "pool": { - "tenant_id": tenant_id, - "name": self.owner.generate_random_name(), - "subnet_id": subnet_id, - "lb_method": kwargs.get("lb_method", self.LB_METHOD), - "protocol": kwargs.get("protocol", self.LB_PROTOCOL) - } - } - return self.client.create_pool(pool_args) - - def _generate_cidr(self, ip_version=4): - # TODO(amaretskiy): Generate CIDRs unique for network, not cluster - return generate_cidr( - start_cidr=self.start_cidr if ip_version == 4 - else self.start_ipv6_cidr) - - def create_network(self, tenant_id, **kwargs): - """Create network. - - The following keyword arguments are accepted: - - * add_router: Deprecated, please use router_create_args instead. - Create an external router and add an interface to each - subnet created. Default: False - * subnets_num: Number of subnets to create per network. Default: 0 - * dualstack: Whether subnets should be of both IPv4 and IPv6 - * dns_nameservers: Nameservers for each subnet. Default: - 8.8.8.8, 8.8.4.4 - * network_create_args: Additional network creation arguments. - * router_create_args: Additional router creation arguments. - - :param tenant_id: str, tenant ID - :param kwargs: Additional options, left open-ended for compatbilitiy. - See above for recognized keyword args. - :returns: dict, network data - """ - network_args = {"network": kwargs.get("network_create_args", {})} - network_args["network"].update({ - "tenant_id": tenant_id, - "name": self.owner.generate_random_name()}) - network = self.client.create_network(network_args)["network"] - - router = None - router_args = dict(kwargs.get("router_create_args", {})) - add_router = kwargs.get("add_router", False) - if router_args or add_router: - router_args["external"] = ( - router_args.get("external", False) or add_router) - router_args["tenant_id"] = tenant_id - router = self.create_router(**router_args) - - dualstack = kwargs.get("dualstack", False) - - subnets = [] - subnets_num = kwargs.get("subnets_num", 0) - ip_versions = itertools.cycle( - [self.SUBNET_IP_VERSION, self.SUBNET_IPV6_VERSION] - if dualstack else [self.SUBNET_IP_VERSION]) - for i in range(subnets_num): - ip_version = next(ip_versions) - subnet_args = { - "subnet": { - "tenant_id": tenant_id, - "network_id": network["id"], - "name": self.owner.generate_random_name(), - "ip_version": ip_version, - "cidr": self._generate_cidr(ip_version), - "enable_dhcp": True, - "dns_nameservers": ( - kwargs.get("dns_nameservers", ["8.8.8.8", "8.8.4.4"]) - if ip_version == 4 - else kwargs.get("dns_nameservers", - ["dead:beaf::1", "dead:beaf::2"])) - } - } - subnet = self.client.create_subnet(subnet_args)["subnet"] - subnets.append(subnet["id"]) - - if router: - self.client.add_interface_router(router["id"], - {"subnet_id": subnet["id"]}) - - return {"id": network["id"], - "name": network["name"], - "status": network["status"], - "subnets": subnets, - "external": network.get("router:external", False), - "router_id": router and router["id"] or None, - "tenant_id": tenant_id} - - def delete_v1_pool(self, pool_id): - """Delete LB Pool (v1) - - :param pool_id: str, Lb-Pool-id - """ - self.client.delete_pool(pool_id) - - def delete_network(self, network): - if self.supports_extension("dhcp_agent_scheduler")[0]: - net_dhcps = self.client.list_dhcp_agent_hosting_networks( - network["id"])["agents"] - for net_dhcp in net_dhcps: - self.client.remove_network_from_dhcp_agent(net_dhcp["id"], - network["id"]) - - if network["router_id"]: - self.client.remove_gateway_router(network["router_id"]) - - for port in self.client.list_ports(network_id=network["id"])["ports"]: - if port["device_owner"] in ( - "network:router_interface", - "network:router_interface_distributed", - "network:ha_router_replicated_interface", - "network:router_gateway"): - try: - self.client.remove_interface_router( - port["device_id"], {"port_id": port["id"]}) - except (neutron_exceptions.BadRequest, - neutron_exceptions.NotFound): - # Some neutron plugins don't use router as - # the device ID. Also, some plugin doesn't allow - # to update the ha rotuer interface as there is - # an internal logic to update the interface/data model - # instead. - pass - else: - try: - self.client.delete_port(port["id"]) - except neutron_exceptions.PortNotFoundClient: - # port is auto-removed - pass - - for subnet in self.client.list_subnets( - network_id=network["id"])["subnets"]: - self._delete_subnet(subnet["id"]) - - response = self.client.delete_network(network["id"]) - - if network["router_id"]: - self.client.delete_router(network["router_id"]) - - return response - - def _delete_subnet(self, subnet_id): - self.client.delete_subnet(subnet_id) - - def list_networks(self): - return self.client.list_networks()["networks"] - - def create_port(self, network_id, **kwargs): - """Create neutron port. - - :param network_id: neutron network id - :param **kwargs: POST /v2.0/ports request options - :returns: neutron port dict - """ - kwargs["network_id"] = network_id - kwargs["name"] = self.owner.generate_random_name() - return self.client.create_port({"port": kwargs})["port"] - - def create_floating_ip(self, ext_network=None, - tenant_id=None, port_id=None, **kwargs): - """Create Neutron floating IP. - - :param ext_network: floating network name or dict - :param tenant_id: str tenant id - :param port_id: str port id - :param **kwargs: for compatibility, not used here - :returns: floating IP dict - """ - if not tenant_id: - raise ValueError("Missed tenant_id") - - if type(ext_network) is dict: - net_id = ext_network["id"] - elif ext_network: - ext_net = self.get_network(name=ext_network) - if not ext_net["external"]: - raise NetworkWrapperException("Network is not external: %s" - % ext_network) - net_id = ext_net["id"] - else: - ext_networks = self.external_networks - if not ext_networks: - raise NetworkWrapperException( - "Failed to allocate floating IP: " - "no external networks found") - net_id = ext_networks[0]["id"] - - kwargs = {"floatingip": {"floating_network_id": net_id, - "tenant_id": tenant_id}} - - if not CONF.openstack.pre_newton_neutron: - descr = self.owner.generate_random_name() - kwargs["floatingip"]["description"] = descr - - if port_id: - kwargs["floatingip"]["port_id"] = port_id - - fip = self.client.create_floatingip(kwargs)["floatingip"] - return {"id": fip["id"], "ip": fip["floating_ip_address"]} - - def delete_floating_ip(self, fip_id, **kwargs): - """Delete floating IP. - - :param fip_id: int floating IP id - :param **kwargs: for compatibility, not used here - """ - self.client.delete_floatingip(fip_id) - - def supports_extension(self, extension): - """Check whether a neutron extension is supported - - :param extension: str, neutron extension - :returns: result tuple - :rtype: (bool, string) - """ - extensions = self.client.list_extensions().get("extensions", []) - if any(ext.get("alias") == extension for ext in extensions): - return True, "" - - return False, "Neutron driver does not support %s" % extension - - -def wrap(clients, owner, config=None): - """Returns available network wrapper instance. - - :param clients: rally.plugins.openstack.osclients.Clients instance - :param owner: The object that owns resources created by this - wrapper instance. It will be used to generate random - names, so must implement - rally.common.utils.RandomNameGeneratorMixin - :param config: The configuration of the network wrapper. Currently - only one config option is recognized, 'start_cidr', - and only for Nova network. - :returns: NetworkWrapper subclass instance - """ - if hasattr(clients, "services"): - services = clients.services() - else: - services = clients("services") - - if consts.Service.NEUTRON in services.values(): - return NeutronWrapper(clients, owner, config=config) - LOG.warning("NovaNetworkWrapper is deprecated since 0.9.0") diff --git a/rally/plugins/workload/__init__.py b/rally/plugins/workload/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rally/plugins/workload/siege.py b/rally/plugins/workload/siege.py deleted file mode 100644 index 8b40235d87..0000000000 --- a/rally/plugins/workload/siege.py +++ /dev/null @@ -1,55 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import re -import subprocess -import sys -import tempfile - - -SIEGE_RE = re.compile(r"^(Throughput|Transaction rate):\s+(\d+\.\d+)\s+.*") - - -def get_instances(): - outputs = json.load(sys.stdin) - for output in outputs: - if output["output_key"] == "wp_nodes": - for node in output["output_value"].values(): - yield node["wordpress-network"][0] - - -def generate_urls_list(instances): - urls = tempfile.NamedTemporaryFile(delete=False) - with urls: - for inst in instances: - for i in range(1, 1000): - urls.write("http://%s/wordpress/index.php/%d/\n" % (inst, i)) - return urls.name - - -def run(): - instances = list(get_instances()) - urls = generate_urls_list(instances) - out = subprocess.check_output( - ["siege", "-q", "-t", "60S", "-b", "-f", urls], - stderr=subprocess.STDOUT) - for line in out.splitlines(): - m = SIEGE_RE.match(line) - if m: - sys.stdout.write("%s:%s\n" % m.groups()) - - -if __name__ == "__main__": - sys.exit(run()) diff --git a/rally/task/types.py b/rally/task/types.py index 61f4e332d4..2342572e1d 100644 --- a/rally/task/types.py +++ b/rally/task/types.py @@ -17,7 +17,6 @@ import abc import copy import operator import re -import traceback import six @@ -91,50 +90,8 @@ def preprocess(name, context, args): return processed_args -def _pre_process_method(self, resource_spec, config): - """pre_process to transform adapter. - - Adopts a call for a new style pre_process instance method if ResourceType - to old style(deprecated way) call to classmethod transform. - """ - if resource_spec is None: - # previously, such arguments were skipped - return - - from rally.plugins.openstack import osclients - - clients = None - if self._context.get("admin"): - clients = osclients.Clients(self._context["admin"]["credential"]) - elif self._context.get("users"): - clients = osclients.Clients(self._context["users"][0]["credential"]) - - return self.__class__.transform(clients=clients, - resource_config=resource_spec) - - -class _OldTypesCompatMeta(type): - - def __new__(mcs, name, parents, dct): - # check for old-style ResourceTypes - if "transform" in dct: - # check the case when plugin supports both old and new styles - if ("pre_process" not in dct - or dct["pre_process"] == ResourceType.pre_process): - dct["pre_process"] = _pre_process_method - - LOG.warning("ResourceType class %s implements an old " - "interface which is deprecated since Rally 0.12 " - "and which will be removed soon." % name) - - return super(_OldTypesCompatMeta, mcs).__new__(mcs, name, parents, dct) - - -_CombinedMeta = type("CombineMeta", (abc.ABCMeta, _OldTypesCompatMeta), {}) - - @plugin.base() -@six.add_metaclass(_CombinedMeta) +@six.add_metaclass(abc.ABCMeta) class ResourceType(plugin.Plugin): def __init__(self, context, cache=None): @@ -152,24 +109,6 @@ class ResourceType(plugin.Plugin): """ -class DeprecatedBehaviourMixin(object): - """A Mixin class which returns deprecated `transform` method.""" - - @classmethod - def transform(cls, clients, resource_config): - caller = traceback.format_stack(limit=2)[0] - LOG.warning("Calling method `transform` of %s is deprecated:\n%s" % - (cls.__name__, caller)) - if clients: - # it doesn't matter "permission" of the user. it will pick the - # first one - context = {"admin": {"credential": clients.credential}} - else: - context = {} - self = cls(context, cache={}) - return self.pre_process(resource_spec=resource_config, config={}) - - def obj_from_name(resource_config, resources, typename): """Return the resource whose name matches the pattern. diff --git a/rally/task/validation.py b/rally/task/validation.py old mode 100755 new mode 100644 index 3476b8081e..40e8e5dcec --- a/rally/task/validation.py +++ b/rally/task/validation.py @@ -14,206 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. -import functools - -from rally.common import logging from rally.common import validation - -LOG = logging.getLogger(__name__) - # TODO(astudenov): remove after deprecating all old validators add = validation.add - - -class ValidationResult(object): - - def __init__(self, is_valid, msg="", etype=None, etraceback=None): - self.is_valid = is_valid - self.msg = msg - self.etype = etype - self.etraceback = etraceback - - def __str__(self): - if self.is_valid: - return "validation success" - if self.etype: - return ("---------- Exception in validator ----------\n" + - self.etraceback) - return self.msg - - -@validation.add("required_platform", platform="openstack", users=True) -@validation.configure(name="old_validator", platform="openstack") -class OldValidator(validation.Validator): - - class Deployment(object): - def __init__(self, ctx): - self.ctx = ctx - - def get_credentials_for(self, platform): - return {"admin": self.ctx["admin"]["credential"], - "users": [u["credential"] for u in self.ctx["users"]]} - - def __init__(self, fn, *args, **kwargs): - """Legacy validator for OpenStack scenarios - - :param fn: function that performs validation - """ - self.fn = fn - self.args = args - self.kwargs = kwargs - - def validate(self, context, config, plugin_cls, plugin_cfg): - users = context["users"] - - deployment = self.Deployment(context) - - if users: - users = [user["credential"].clients() for user in users] - for clients in users: - result = self._run_fn(config, deployment, clients) - if result and not result.is_valid: - self.fail(str(result)) - return - else: - result = self._run_fn(config, deployment) - if result and not result.is_valid: - self.fail(str(result)) - - def _run_fn(self, config, deployment, clients=None): - return self.fn(config, clients, deployment, *self.args, **self.kwargs) - - -def validator(fn): - """Decorator that constructs a scenario validator from given function. - - Decorated function should return ValidationResult on error. - - :param fn: function that performs validation - :returns: rally scenario validator - """ - - LOG.warning("The old validator mechanism is deprecated since Rally 0.10.0." - " Use plugin base for validators - " - "rally.common.validation.Validator (see rally.plugin.common." - "validators and rally.plugins.openstack.validators for " - "examples).") - - def wrap_given(*args, **kwargs): - """Dynamic validation decorator for scenario. - - :param args: the arguments of the decorator of the scenario - ex. @my_decorator("arg1"), then args = ("arg1",) - :param kwargs: the keyword arguments of the decorator of the scenario - ex. @my_decorator(kwarg1="kwarg1"), then kwargs = {"kwarg1": "kwarg1"} - """ - def wrap_scenario(scenario): - scenario._meta_setdefault("validators", []) - scenario._meta_get("validators").append( - ("old_validator", (fn, ) + args, kwargs)) - return scenario - - return wrap_scenario - - return wrap_given - - -# TODO(astudenov): remove deprecated validators in 1.0.0 - -def deprecated_validator(name, old_validator_name, rally_version): - def decorator(*args, **kwargs): - def wrapper(plugin): - plugin_name = plugin.get_name() - LOG.warning( - "Plugin '%s' uses validator 'rally.task.validation.%s' which " - "is deprecated in favor of '%s' (it should be used " - "via new decorator 'rally.common.validation.add') in " - "Rally v%s." - % (plugin_name, old_validator_name, name, rally_version)) - plugin._meta_setdefault("validators", []) - plugin._meta_get("validators").append((name, args, kwargs,)) - return plugin - return wrapper - return decorator - - -_deprecated_platform_validator = deprecated_validator( - "required_platform", "required_openstack", "0.10.0") - -required_openstack = functools.partial( - _deprecated_platform_validator, platform="openstack") - -number = deprecated_validator("number", "number", "0.10.0") - -image_exists = deprecated_validator("image_exists", "image_exists", "0.10.0") - -external_network_exists = deprecated_validator("external_network_exists", - "external_network_exists", - "0.10.0") - -required_neutron_extensions = deprecated_validator( - "required_neutron_extensions", "required_neutron_extensions", "0.10.0") - -image_valid_on_flavor = deprecated_validator("image_valid_on_flavor", - "image_valid_on_flavor", - "0.10.0") - -required_clients = deprecated_validator("required_clients", "required_clients", - "0.10.0") - -required_services = deprecated_validator("required_services", - "required_services", "0.10.0") - -validate_heat_template = deprecated_validator("validate_heat_template", - "validate_heat_template", - "0.10.0") - -restricted_parameters = deprecated_validator("restricted_parameters", - "restricted_parameters", - "0.10.0") - -required_cinder_services = deprecated_validator("required_cinder_services", - "required_cinder_services", - "0.10.0") - -required_api_versions = deprecated_validator("required_api_versions", - "required_api_versions", - "0.10.0") - -required_contexts = deprecated_validator("required_contexts", - "required_contexts", - "0.10.0") - -required_param_or_context = deprecated_validator("required_param_or_context", - "required_param_or_context", - "0.10.0") - -volume_type_exists = deprecated_validator("volume_type_exists", - "volume_type_exists", - "0.10.0") - -file_exists = deprecated_validator("file_exists", "file_exists", "0.10.0") - -valid_command = deprecated_validator("valid_command", "valid_command", - "0.10.0") - -flavor_exists = deprecated_validator("flavor_exists", "flavor_exists", - "0.10.0") - -_deprecated_share_proto = deprecated_validator( - "enum", "validate_share_proto", "0.10.0") - -validate_share_proto = functools.partial( - _deprecated_share_proto, - param_name="share_proto", - values=["NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS"], - case_insensitive=True) - -_workbook_contains_workflow = deprecated_validator( - "workbook_contains_workflow", "workbook_contains_workflow", "0.10.0") - - -def workbook_contains_workflow(workbook, workflow_name): - return _workbook_contains_workflow(workbook_param=workbook, - workflow_param=workflow_name) diff --git a/requirements.txt b/requirements.txt index 0f55c5ad43..e1047eb845 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,31 +23,3 @@ requests>=2.14.2 # Apache License, Version SQLAlchemy>=1.0.10,!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8 # MIT six>=1.10.0 # MIT virtualenv>=14.0.6 # MIT - -# OpenStack related -boto>=2.32.1 # MIT -gnocchiclient>=3.3.1 # Apache Software License -keystoneauth1>=3.3.0 # Apache Software License -os-faults>=0.1.15 # Apache Software License -osprofiler>=1.4.0 # Apache Software License -python-ceilometerclient>=2.5.0 # Apache Software License -python-cinderclient>=3.3.0 # Apache Software License -python-designateclient>=2.7.0 # Apache License, Version 2.0 -python-heatclient>=1.10.0 # Apache Software License -python-glanceclient>=2.8.0 # Apache License, Version 2.0 -python-ironicclient>=2.2.0 # Apache Software License -python-keystoneclient>=3.8.0 # Apache Software License -python-magnumclient>=2.1.0 # Apache Software License -python-manilaclient>=1.16.0 # Apache Software License -python-mistralclient>=3.1.0 # Apache Software License -python-muranoclient>=0.8.2 # Apache License, Version 2.0 -python-monascaclient>=1.7.0 # Apache Software License -python-neutronclient>=6.3.0 # Apache Software License -python-novaclient>=9.1.0 # Apache License, Version 2.0 -python-saharaclient>=1.4.0 # Apache License, Version 2.0 -python-senlinclient>=1.1.0 # Apache Software License -python-swiftclient>=3.2.0 # Apache Software License -python-troveclient>=2.2.0 # Apache Software License -python-watcherclient>=1.1.0 # Apache Software License -python-zaqarclient>=1.0.0 # Apache Software License -kubernetes>1.0.0 # Apache License Version 2.0 diff --git a/samples/deployments/README.rst b/samples/deployments/README.rst deleted file mode 100644 index 7571d2b250..0000000000 --- a/samples/deployments/README.rst +++ /dev/null @@ -1,40 +0,0 @@ -Rally Deployments -================= - -Rally needs to have information about OpenStack Cloud before you actually -can run any tests against it. - -You need create a deployment input file and run use command bellow: - -.. code-block:: - - rally deployment create --file --name my_cloud - -Below you can find samples of supported configurations. - -existing.json -------------- - -Register existing OpenStack cluster. - -existing-keystone-v3.json -------------------------- - -Register existing OpenStack cluster that uses Keystone v3. - -existing-with-predefined-users.json --------------------------------------- - -If you are using read-only backend in Keystone like LDAP, AD then -you need this sample. If you don't specify "users" rally will use already -existing users that you provide. - - -existing-with-given-endpoint.json ---------------------------------- - -Register existing OpenStack cluster, with parameter "endpoint" specified -to explicitly set keystone management_url. Use this parameter if -keystone fails to setup management_url correctly. -For example, this parameter must be specified for FUEL cluster -and has value "http://:35357/v2.0/" diff --git a/samples/deployments/existing-keystone-v3-osprofiler.json b/samples/deployments/existing-keystone-v3-osprofiler.json deleted file mode 100644 index e695148e4c..0000000000 --- a/samples/deployments/existing-keystone-v3-osprofiler.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "openstack": { - "auth_url": "http://example.net:5000/v3/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "myadminpass", - "user_domain_name": "admin", - "project_name": "admin", - "project_domain_name": "admin" - }, - "https_insecure": false, - "https_cacert": "", - "profiler_hmac_key": "SECRET_KEY", - "profiler_conn_str": "mongodb://localhost" - } -} diff --git a/samples/deployments/existing-keystone-v3.json b/samples/deployments/existing-keystone-v3.json deleted file mode 100644 index 0b477c9b5d..0000000000 --- a/samples/deployments/existing-keystone-v3.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "openstack": { - "auth_url": "http://example.net:5000/v3/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "myadminpass", - "user_domain_name": "admin", - "project_name": "admin", - "project_domain_name": "admin" - }, - "https_insecure": false, - "https_cacert": "" - } -} diff --git a/samples/deployments/existing-with-given-endpoint.json b/samples/deployments/existing-with-given-endpoint.json deleted file mode 100644 index 7b2943e727..0000000000 --- a/samples/deployments/existing-with-given-endpoint.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "openstack": { - "auth_url": "http://example.net:5000/v2.0/", - "region_name": "RegionOne", - "endpoint_type": "public", - "endpoint": "http://:// example: http://172.16.0.2:35357/v2.0/", - "admin": { - "username": "admin", - "password": "pa55word", - "tenant_name": "demo" - }, - "https_insecure": false, - "https_cacert": "" - } -} diff --git a/samples/deployments/existing-with-predefined-users.json b/samples/deployments/existing-with-predefined-users.json deleted file mode 100644 index d34d1952bf..0000000000 --- a/samples/deployments/existing-with-predefined-users.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "openstack": { - "auth_url": "http://example.net:5000/v2.0/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "pa55word", - "tenant_name": "demo" - }, - "users": [ - { - "username": "not_an_admin1", - "password": "password", - "tenant_name": "some_tenant" - }, - { - "username": "not_an_admin2", - "password": "password2", - "tenant_name": "some_tenant2" - } - ] - } -} diff --git a/samples/deployments/existing.json b/samples/deployments/existing.json deleted file mode 100644 index 24951a75bf..0000000000 --- a/samples/deployments/existing.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "openstack": { - "auth_url": "http://example.net:5000/v2.0/", - "region_name": "RegionOne", - "endpoint_type": "public", - "admin": { - "username": "admin", - "password": "myadminpass", - "tenant_name": "demo" - }, - "https_insecure": false, - "https_cacert": "" - } -} diff --git a/samples/tasks/contexts/allow-ssh.json b/samples/tasks/contexts/allow-ssh.json deleted file mode 100644 index e5a24de204..0000000000 --- a/samples/tasks/contexts/allow-ssh.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "allow_ssh": null - } - } - ] -} diff --git a/samples/tasks/contexts/allow-ssh.yaml b/samples/tasks/contexts/allow-ssh.yaml deleted file mode 100644 index 8b190cd845..0000000000 --- a/samples/tasks/contexts/allow-ssh.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - allow_ssh: null diff --git a/samples/tasks/contexts/api-versions.json b/samples/tasks/contexts/api-versions.json deleted file mode 100644 index 095681cac4..0000000000 --- a/samples/tasks/contexts/api-versions.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "api_versions": { - "nova": { - "version": 2.2 - }, - "cinder": { - "version": 2, - "service_type": "volumev2" - } - } - } - } - ] -} diff --git a/samples/tasks/contexts/api-versions.yaml b/samples/tasks/contexts/api-versions.yaml deleted file mode 100644 index b68c403a2d..0000000000 --- a/samples/tasks/contexts/api-versions.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - api_versions: - nova: - version: 2.2 - cinder: - version: 2 - service_type: "volumev2" \ No newline at end of file diff --git a/samples/tasks/contexts/audit-templates.json b/samples/tasks/contexts/audit-templates.json deleted file mode 100644 index b9e8b98064..0000000000 --- a/samples/tasks/contexts/audit-templates.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "audit_templates": { - "audit_templates_per_admin": 5, - "fill_strategy": "random", - "params": [ - { - "goal": { - "name": "workload_balancing" - }, - "strategy": { - "name": "workload_stabilization" - } - }, - { - "goal": { - "name": "dummy" - }, - "strategy": { - "name": "dummy" - } - } - ] - } - } - } - ] -} diff --git a/samples/tasks/contexts/audit-templates.yaml b/samples/tasks/contexts/audit-templates.yaml deleted file mode 100644 index 05c2e2a816..0000000000 --- a/samples/tasks/contexts/audit-templates.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "random" - params: - - goal: - name: "workload_balancing" - strategy: - name: "workload_stabilization" - - goal: - name: "dummy" - strategy: - name: "dummy" diff --git a/samples/tasks/contexts/ca-certs.json b/samples/tasks/contexts/ca-certs.json deleted file mode 100644 index cb76f81b9d..0000000000 --- a/samples/tasks/contexts/ca-certs.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "ca_certs": { - "directory": "/home/stack" - }, - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "ca_certs": { - "directory": "/home/stack" - }, - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/ca-certs.yaml b/samples/tasks/contexts/ca-certs.yaml deleted file mode 100644 index 7d61015965..0000000000 --- a/samples/tasks/contexts/ca-certs.yaml +++ /dev/null @@ -1,48 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - ca_certs: - directory: "/home/stack" - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - clusters: - node_count: 2 - ca_certs: - directory: "/home/stack" diff --git a/samples/tasks/contexts/ceilometer.json b/samples/tasks/contexts/ceilometer.json deleted file mode 100644 index c3991d739e..0000000000 --- a/samples/tasks/contexts/ceilometer.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 60, - "metadata_list": [ - {"status": "active", "name": "fake_resource", - "deleted": "False", - "created_at": "2015-09-04T12:34:19.000000"}, - {"status": "not_active", "name": "fake_resource_1", - "deleted": "False", - "created_at": "2015-09-10T06:55:12.000000"} - ], - "batch_size": 5 - } - } - } - ] -} diff --git a/samples/tasks/contexts/ceilometer.yaml b/samples/tasks/contexts/ceilometer.yaml deleted file mode 100644 index 22a6128cd4..0000000000 --- a/samples/tasks/contexts/ceilometer.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 60 - metadata_list: - - status: "active" - name: "fake_resource" - deleted: "False" - created_at: "2015-09-04T12:34:19.000000" - - status: "not_active" - name: "fake_resource_1" - deleted: "False" - created_at: "2015-09-10T06:55:12.000000" - batch_size: 5 \ No newline at end of file diff --git a/samples/tasks/contexts/cluster-templates.json b/samples/tasks/contexts/cluster-templates.json deleted file mode 100644 index 4594749a9d..0000000000 --- a/samples/tasks/contexts/cluster-templates.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/cluster-templates.yaml b/samples/tasks/contexts/cluster-templates.yaml deleted file mode 100644 index 16ccc4cf0c..0000000000 --- a/samples/tasks/contexts/cluster-templates.yaml +++ /dev/null @@ -1,58 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" diff --git a/samples/tasks/contexts/clusters.json b/samples/tasks/contexts/clusters.json deleted file mode 100644 index b9c271e565..0000000000 --- a/samples/tasks/contexts/clusters.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/clusters.yaml b/samples/tasks/contexts/clusters.yaml deleted file mode 100644 index 07bac0ca37..0000000000 --- a/samples/tasks/contexts/clusters.yaml +++ /dev/null @@ -1,64 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - clusters: - node_count: 2 - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" - clusters: - node_count: 2 diff --git a/samples/tasks/contexts/dummy-context.json b/samples/tasks/contexts/dummy-context.json index 4ebdc3f310..b7bca1c8b3 100644 --- a/samples/tasks/contexts/dummy-context.json +++ b/samples/tasks/contexts/dummy-context.json @@ -1,5 +1,5 @@ { - "Dummy.openstack": [ + "Dummy.dummy": [ { "args": { "sleep": 0.1 @@ -10,10 +10,6 @@ "concurrency": 2 }, "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, "dummy_context": { "fail_setup": false, "fail_cleanup": false diff --git a/samples/tasks/contexts/dummy-context.yaml b/samples/tasks/contexts/dummy-context.yaml index 7ab64cc198..247e340afc 100644 --- a/samples/tasks/contexts/dummy-context.yaml +++ b/samples/tasks/contexts/dummy-context.yaml @@ -1,5 +1,5 @@ --- - Dummy.openstack: + Dummy.dummy: - args: sleep: 0.1 @@ -8,9 +8,6 @@ times: 4 concurrency: 2 context: - users: - tenants: 1 - users_per_tenant: 2 dummy_context: fail_setup: false fail_cleanup: false \ No newline at end of file diff --git a/samples/tasks/contexts/ec2-servers.json b/samples/tasks/contexts/ec2-servers.json deleted file mode 100644 index c55a4a1d90..0000000000 --- a/samples/tasks/contexts/ec2-servers.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "ec2_servers": { - "flavor": { - "name": "m1.tiny" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "servers_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/contexts/ec2-servers.yaml b/samples/tasks/contexts/ec2-servers.yaml deleted file mode 100644 index 376b944227..0000000000 --- a/samples/tasks/contexts/ec2-servers.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - ec2_servers: - flavor: - name: "m1.tiny" - image: - name: "^cirros.*-disk$" - servers_per_tenant: 2 diff --git a/samples/tasks/contexts/existing-network.json b/samples/tasks/contexts/existing-network.json deleted file mode 100644 index abbc70a18d..0000000000 --- a/samples/tasks/contexts/existing-network.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "existing_network": {} - } - } - ] -} diff --git a/samples/tasks/contexts/existing-network.yaml b/samples/tasks/contexts/existing-network.yaml deleted file mode 100644 index 1383106642..0000000000 --- a/samples/tasks/contexts/existing-network.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - existing_network: {} \ No newline at end of file diff --git a/samples/tasks/contexts/flavors.json b/samples/tasks/contexts/flavors.json deleted file mode 100644 index c398a29c4a..0000000000 --- a/samples/tasks/contexts/flavors.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "flavors": [ - { - "name": "ram64", - "ram": 64 - } - ] - } - } - ] -} diff --git a/samples/tasks/contexts/flavors.yaml b/samples/tasks/contexts/flavors.yaml deleted file mode 100644 index dc68043584..0000000000 --- a/samples/tasks/contexts/flavors.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - flavors: - - - name: "ram64" - ram: 64 \ No newline at end of file diff --git a/samples/tasks/contexts/heat-dataplane.json b/samples/tasks/contexts/heat-dataplane.json deleted file mode 100644 index 38a0bd6b86..0000000000 --- a/samples/tasks/contexts/heat-dataplane.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "network": { - "start_cidr": "10.2.0.0/24", - "networks_per_tenant": 1, - "subnets_per_network": 1, - "network_create_args": {}, - "dns_nameservers": ["10.2.0.1"] - }, - "heat_dataplane": { - "stacks_per_tenant": 1, - "template": "samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template", - "files": { - "file1": "f1.yaml", - "file2": "f2.yaml" - }, - "parameters": { - "count": 40, - "delay": 0.1 - }, - "context_parameters": {} - } - } - } - ] -} diff --git a/samples/tasks/contexts/heat-dataplane.yaml b/samples/tasks/contexts/heat-dataplane.yaml deleted file mode 100644 index 9e67069028..0000000000 --- a/samples/tasks/contexts/heat-dataplane.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 1 - subnets_per_network: 1 - network_create_args: {} - dns_nameservers: - - "10.2.0.1" - heat_dataplane: - stacks_per_tenant: 1 - template: "samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template" - files: - file1: "f1.yaml" - file2: "f2.yaml" - parameters: - count: 40 - delay: 0.1 - context_parameters: {} \ No newline at end of file diff --git a/samples/tasks/contexts/image-command-customizer.json b/samples/tasks/contexts/image-command-customizer.json deleted file mode 100644 index 7748139e6d..0000000000 --- a/samples/tasks/contexts/image-command-customizer.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1, - "timeout": 3000 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "image_command_customizer": { - "image": {"name": "Fedora-x86_64-20-20140618-sda"}, - "flavor": {"name": "m1.small"}, - "command": { - "local_path": "rally-jobs/extra/install_benchmark.sh", - "remote_path": "./install_benchmark.sh" - }, - "username": "root", - "userdata": "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - }, - "network": {} - } - } - ] -} diff --git a/samples/tasks/contexts/image-command-customizer.yaml b/samples/tasks/contexts/image-command-customizer.yaml deleted file mode 100644 index 832e037d25..0000000000 --- a/samples/tasks/contexts/image-command-customizer.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - concurrency: 1 - timeout: 3000 - times: 1 - type: "constant" - context: - image_command_customizer: - command: - local_path: "rally-jobs/extra/install_benchmark.sh" - remote_path: "./install_benchmark.sh" - flavor: - name: m1.small - image: - name: "Fedora-x86_64-20-20140618-sda" - userdata: "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - username: root - network: {} - users: - tenants: 1 - users_per_tenant: 1 diff --git a/samples/tasks/contexts/images.json b/samples/tasks/contexts/images.json deleted file mode 100644 index 7a2ec6009f..0000000000 --- a/samples/tasks/contexts/images.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "images": { - "image_url": "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img", - "image_type": "qcow2", - "image_container": "bare", - "images_per_tenant": 4 - } - } - } - ] -} diff --git a/samples/tasks/contexts/images.yaml b/samples/tasks/contexts/images.yaml deleted file mode 100644 index 3bb9cd3727..0000000000 --- a/samples/tasks/contexts/images.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 2 - images: - image_url: "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img" - image_type: "qcow2" - image_container: "bare" - images_per_tenant: 4 diff --git a/samples/tasks/contexts/keypair.json b/samples/tasks/contexts/keypair.json deleted file mode 100644 index 66d0f145ac..0000000000 --- a/samples/tasks/contexts/keypair.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "keypair": {} - } - } - ] -} diff --git a/samples/tasks/contexts/keypair.yaml b/samples/tasks/contexts/keypair.yaml deleted file mode 100644 index 3e5cd148a4..0000000000 --- a/samples/tasks/contexts/keypair.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - keypair: {} diff --git a/samples/tasks/contexts/lbaas.json b/samples/tasks/contexts/lbaas.json deleted file mode 100644 index 442227fffb..0000000000 --- a/samples/tasks/contexts/lbaas.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "lbaas": { - "pool": {} - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "lbaas": { - "pool": { - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP" - } - } - } - } - ] -} diff --git a/samples/tasks/contexts/lbaas.yaml b/samples/tasks/contexts/lbaas.yaml deleted file mode 100644 index 706fee7611..0000000000 --- a/samples/tasks/contexts/lbaas.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - lbaas: - pool: {} - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - lbaas: - pool: - lb_method: "ROUND_ROBIN" - protocol: "HTTP" diff --git a/samples/tasks/contexts/manila-security-services.json b/samples/tasks/contexts/manila-security-services.json deleted file mode 100644 index 1f59fd21b3..0000000000 --- a/samples/tasks/contexts/manila-security-services.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "manila_share_networks": { - "use_share_networks": true - }, - "manila_security_services": { - "security_services": [ - { - "type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user" - }, - { - "type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user" - }, - { - "type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user" - } - ] - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/contexts/manila-security-services.yaml b/samples/tasks/contexts/manila-security-services.yaml deleted file mode 100644 index 8c052563e7..0000000000 --- a/samples/tasks/contexts/manila-security-services.yaml +++ /dev/null @@ -1,42 +0,0 @@ -Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_share_networks: - use_share_networks: True - manila_security_services: - security_services: - - - type: "ldap" - server: "LDAP server address" - user: "User that will be used" - password: "Password for specified user" - - - type: "kerberos" - dns_ip: "IP address of DNS service to be used" - server: "Kerberos server address" - domain: "Kerberos realm" - user: "User that will be used" - password: "Password for specified user" - - - type: "active_directory" - dns_ip: "IP address of DNS service to be used" - domain: "Domain from 'Active Directory'" - user: "User from 'Active Directory'" - password: "password for specified user" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/contexts/manila-share-networks.json b/samples/tasks/contexts/manila-share-networks.json deleted file mode 100644 index 7af9620a51..0000000000 --- a/samples/tasks/contexts/manila-share-networks.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "manila_share_networks": { - "use_share_networks": true - }, - "manila_shares": { - "shares_per_tenant": 1, - "share_proto": "NFS", - "size": 1, - "share_type": "dhss_true" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/contexts/manila-share-networks.yaml b/samples/tasks/contexts/manila-share-networks.yaml deleted file mode 100644 index d18420b40f..0000000000 --- a/samples/tasks/contexts/manila-share-networks.yaml +++ /dev/null @@ -1,27 +0,0 @@ -Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_share_networks: - use_share_networks: True - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - share_type: "dhss_true" - sla: - failure_rate: - max: 0 \ No newline at end of file diff --git a/samples/tasks/contexts/manila-shares.json b/samples/tasks/contexts/manila-shares.json deleted file mode 100644 index 66104c7210..0000000000 --- a/samples/tasks/contexts/manila-shares.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "manila_share_networks": { - "use_share_networks": true - }, - "manila_shares": { - "shares_per_tenant": 1, - "share_proto": "NFS", - "size": 1, - "share_type": "dhss_true" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/contexts/manila-shares.yaml b/samples/tasks/contexts/manila-shares.yaml deleted file mode 100644 index d18420b40f..0000000000 --- a/samples/tasks/contexts/manila-shares.yaml +++ /dev/null @@ -1,27 +0,0 @@ -Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - manila_share_networks: - use_share_networks: True - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - share_type: "dhss_true" - sla: - failure_rate: - max: 0 \ No newline at end of file diff --git a/samples/tasks/contexts/monasca-metrics.json b/samples/tasks/contexts/monasca-metrics.json deleted file mode 100644 index a74889e389..0000000000 --- a/samples/tasks/contexts/monasca-metrics.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "monasca-user" - ], - "monasca_metrics": { - "dimensions": { - "region": "RegionOne", - "service": "identity", - "hostname": "fake_host", - "url": "http://fake_host:5000/v2.0" - }, - "metrics_per_tenant": 10 - } - } - } - ] -} diff --git a/samples/tasks/contexts/monasca-metrics.yaml b/samples/tasks/contexts/monasca-metrics.yaml deleted file mode 100644 index 7146c1216f..0000000000 --- a/samples/tasks/contexts/monasca-metrics.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "monasca-user" - monasca_metrics: - dimensions: - region: "RegionOne" - service: "identity" - hostname: "fake_host" - url: "http://fake_host:5000/v2.0" - metrics_per_tenant: 10 diff --git a/samples/tasks/contexts/murano-environments.json b/samples/tasks/contexts/murano-environments.json deleted file mode 100644 index 286b33e87c..0000000000 --- a/samples/tasks/contexts/murano-environments.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_environments": { - "environments_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/contexts/murano-environments.yaml b/samples/tasks/contexts/murano-environments.yaml deleted file mode 100644 index cc36e35cd6..0000000000 --- a/samples/tasks/contexts/murano-environments.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_environments: - environments_per_tenant: 2 diff --git a/samples/tasks/contexts/murano-packages.json b/samples/tasks/contexts/murano-packages.json deleted file mode 100644 index 31f02a81bd..0000000000 --- a/samples/tasks/contexts/murano-packages.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_packages": { - "app_package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - }, - "roles": ["admin"] - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_packages": { - "app_package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - }, - "roles": ["admin"] - } - } - ] -} diff --git a/samples/tasks/contexts/murano-packages.yaml b/samples/tasks/contexts/murano-packages.yaml deleted file mode 100644 index 87e5709e9c..0000000000 --- a/samples/tasks/contexts/murano-packages.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - roles: - - "admin" - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - roles: - - "admin" diff --git a/samples/tasks/contexts/network.json b/samples/tasks/contexts/network.json deleted file mode 100644 index 5958b59fdc..0000000000 --- a/samples/tasks/contexts/network.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "network": {} - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "network": { - "start_cidr": "10.2.0.0/24", - "networks_per_tenant": 1, - "subnets_per_network": 1, - "network_create_args": {}, - "dns_nameservers": ["10.2.0.1"], - "router": { - "external": false - } - } - } - } - ] -} diff --git a/samples/tasks/contexts/network.yaml b/samples/tasks/contexts/network.yaml deleted file mode 100644 index 53c9de6b54..0000000000 --- a/samples/tasks/contexts/network.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - network: {} - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 1 - subnets_per_network: 1 - network_create_args: {} - dns_nameservers: - - "10.2.0.1" - router: - external: false \ No newline at end of file diff --git a/samples/tasks/contexts/profiles.json b/samples/tasks/contexts/profiles.json deleted file mode 100644 index 1db069afe0..0000000000 --- a/samples/tasks/contexts/profiles.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "profiles": { - "type": "os.nova.server", - "version": "1.0", - "properties": { - "name": "cirros_server", - "flavor": 1, - "image": "cirros-0.3.5-x86_64-disk", - "networks": [ - { "network": "private" } - ] - } - } - } - } - ] -} diff --git a/samples/tasks/contexts/profiles.yaml b/samples/tasks/contexts/profiles.yaml deleted file mode 100644 index 48e951a012..0000000000 --- a/samples/tasks/contexts/profiles.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - profiles: - type: "os.nova.server" - version: "1.0" - properties: - name: "cirros_server" - flavor: 1 - image: "cirros-0.3.5-x86_64-disk" - networks: - - network: "private" diff --git a/samples/tasks/contexts/quotas.json b/samples/tasks/contexts/quotas.json deleted file mode 100644 index f1aee45427..0000000000 --- a/samples/tasks/contexts/quotas.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "quotas": { - "manila": { - "share_networks": -1 - } - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "quotas": { - "cinder": { - "volumes": -1 - } - } - } - } - ] -} diff --git a/samples/tasks/contexts/quotas.yaml b/samples/tasks/contexts/quotas.yaml deleted file mode 100644 index 85afcc219e..0000000000 --- a/samples/tasks/contexts/quotas.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - share_networks: -1 - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - cinder: - volumes: -1 diff --git a/samples/tasks/contexts/roles.json b/samples/tasks/contexts/roles.json deleted file mode 100644 index 0d09cd4186..0000000000 --- a/samples/tasks/contexts/roles.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "roles": ["role"] - } - } - ] -} diff --git a/samples/tasks/contexts/roles.yaml b/samples/tasks/contexts/roles.yaml deleted file mode 100644 index 32c50488fe..0000000000 --- a/samples/tasks/contexts/roles.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - roles: - - "role" diff --git a/samples/tasks/contexts/router.json b/samples/tasks/contexts/router.json deleted file mode 100644 index b9b83e8926..0000000000 --- a/samples/tasks/contexts/router.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "router": {} - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "router": { - "routers_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/router.yaml b/samples/tasks/contexts/router.yaml deleted file mode 100644 index 436cfa83e0..0000000000 --- a/samples/tasks/contexts/router.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - router: {} - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - router: - routers_per_tenant: 1 diff --git a/samples/tasks/contexts/sahara-cluster.json b/samples/tasks/contexts/sahara-cluster.json deleted file mode 100644 index 5a40ed5ffc..0000000000 --- a/samples/tasks/contexts/sahara-cluster.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/sahara-cluster.yaml b/samples/tasks/contexts/sahara-cluster.yaml deleted file mode 100644 index 68b915d67d..0000000000 --- a/samples/tasks/contexts/sahara-cluster.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} diff --git a/samples/tasks/contexts/sahara-image.json b/samples/tasks/contexts/sahara-image.json deleted file mode 100644 index da85b6b016..0000000000 --- a/samples/tasks/contexts/sahara-image.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.3.0" - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/sahara-image.yaml b/samples/tasks/contexts/sahara-image.yaml deleted file mode 100644 index 97b01eac2c..0000000000 --- a/samples/tasks/contexts/sahara-image.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.3.0" - network: {} diff --git a/samples/tasks/contexts/sahara-input-data-sources.json b/samples/tasks/contexts/sahara-input-data-sources.json deleted file mode 100644 index 3bdadce43f..0000000000 --- a/samples/tasks/contexts/sahara-input-data-sources.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_input_data_sources": { - "input_type": "hdfs", - "input_url": "/" - }, - "sahara_output_data_sources": { - "output_type": "hdfs", - "output_url_prefix": "/out_" - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/sahara-input-data-sources.yaml b/samples/tasks/contexts/sahara-input-data-sources.yaml deleted file mode 100644 index ed96d8110a..0000000000 --- a/samples/tasks/contexts/sahara-input-data-sources.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_input_data_sources: - input_type: "hdfs" - input_url: "/" - sahara_output_data_sources: - output_type: "hdfs" - output_url_prefix: "/out_" - network: {} diff --git a/samples/tasks/contexts/sahara-job-binaries.json b/samples/tasks/contexts/sahara-job-binaries.json deleted file mode 100644 index 2e1753892b..0000000000 --- a/samples/tasks/contexts/sahara-job-binaries.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_job_binaries": { - "libs": [{ - "name": "tests.jar", - "download_url": "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - }] - }, - "network": {} - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_job_binaries": { - "mains": [{ - "name": "example.pig", - "download_url": "https://raw.githubusercontent.com/openstack/sahara/master/etc/edp-examples/pig-job/example.pig" - }], - "libs": [{ - "name": "udf.jar", - "download_url": "https://github.com/openstack/sahara/blob/master/etc/edp-examples/pig-job/udf.jar?raw=true" - }] - }, - "network": {} - } - } - ] -} diff --git a/samples/tasks/contexts/sahara-job-binaries.yaml b/samples/tasks/contexts/sahara-job-binaries.yaml deleted file mode 100644 index 47cdf97b5d..0000000000 --- a/samples/tasks/contexts/sahara-job-binaries.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_job_binaries: - libs: - - - name: "tests.jar" - download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - network: {} - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_job_binaries: - mains: - - - name: "example.pig" - download_url: "https://raw.githubusercontent.com/openstack/sahara/master/etc/edp-examples/pig-job/example.pig" - libs: - - - name: "udf.jar" - download_url: "https://github.com/openstack/sahara/blob/master/etc/edp-examples/pig-job/udf.jar?raw=true" - network: {} diff --git a/samples/tasks/contexts/sahara-output-data-sources.json b/samples/tasks/contexts/sahara-output-data-sources.json deleted file mode 100644 index 3bdadce43f..0000000000 --- a/samples/tasks/contexts/sahara-output-data-sources.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_input_data_sources": { - "input_type": "hdfs", - "input_url": "/" - }, - "sahara_output_data_sources": { - "output_type": "hdfs", - "output_url_prefix": "/out_" - }, - "network": {} - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/sahara-output-data-sources.yaml b/samples/tasks/contexts/sahara-output-data-sources.yaml deleted file mode 100644 index ed96d8110a..0000000000 --- a/samples/tasks/contexts/sahara-output-data-sources.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_input_data_sources: - input_type: "hdfs" - input_url: "/" - sahara_output_data_sources: - output_type: "hdfs" - output_url_prefix: "/out_" - network: {} diff --git a/samples/tasks/contexts/servers.json b/samples/tasks/contexts/servers.json deleted file mode 100644 index 262e524c50..0000000000 --- a/samples/tasks/contexts/servers.json +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "servers": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "servers_per_tenant": 2 - } - } - } - ] -} diff --git a/samples/tasks/contexts/servers.yaml b/samples/tasks/contexts/servers.yaml deleted file mode 100644 index 0365cf16c6..0000000000 --- a/samples/tasks/contexts/servers.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 2 - servers: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - servers_per_tenant: 2 \ No newline at end of file diff --git a/samples/tasks/contexts/stacks.json b/samples/tasks/contexts/stacks.json deleted file mode 100644 index 0ddc8d2f57..0000000000 --- a/samples/tasks/contexts/stacks.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "stacks": { - "stacks_per_tenant": 2, - "resources_per_stack": 10 - } - } - } - ] -} diff --git a/samples/tasks/contexts/stacks.yaml b/samples/tasks/contexts/stacks.yaml deleted file mode 100644 index 0080eb9253..0000000000 --- a/samples/tasks/contexts/stacks.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - stacks: - stacks_per_tenant: 2 - resources_per_stack: 10 \ No newline at end of file diff --git a/samples/tasks/contexts/swift-objects.json b/samples/tasks/contexts/swift-objects.json deleted file mode 100644 index 546f5a44b2..0000000000 --- a/samples/tasks/contexts/swift-objects.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 6, - "concurrency": 3 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ], - "swift_objects": { - "containers_per_tenant": 1, - "objects_per_container": 10, - "object_size": 1024 - } - } - } - ] -} diff --git a/samples/tasks/contexts/swift-objects.yaml b/samples/tasks/contexts/swift-objects.yaml deleted file mode 100644 index 23fe638020..0000000000 --- a/samples/tasks/contexts/swift-objects.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 1 - objects_per_container: 10 - object_size: 1024 diff --git a/samples/tasks/contexts/users.json b/samples/tasks/contexts/users.json deleted file mode 100644 index 108f3d44eb..0000000000 --- a/samples/tasks/contexts/users.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - } - } - }, - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2, - "resource_management_workers": 1, - "project_domain": "project", - "user_domain": "demo", - "user_choice_method": "random" - } - } - } - ] -} diff --git a/samples/tasks/contexts/users.yaml b/samples/tasks/contexts/users.yaml deleted file mode 100644 index f2ccacac5f..0000000000 --- a/samples/tasks/contexts/users.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - resource_management_workers: 1 - project_domain: "project" - user_domain: "demo" - user_choice_method: "random" diff --git a/samples/tasks/contexts/volume-types.json b/samples/tasks/contexts/volume-types.json deleted file mode 100644 index 73a962b26e..0000000000 --- a/samples/tasks/contexts/volume-types.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "volume_types": ["test"] - } - } - ] -} \ No newline at end of file diff --git a/samples/tasks/contexts/volume-types.yaml b/samples/tasks/contexts/volume-types.yaml deleted file mode 100644 index a0e82cfcad..0000000000 --- a/samples/tasks/contexts/volume-types.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - volume_types: - - test \ No newline at end of file diff --git a/samples/tasks/contexts/volumes.json b/samples/tasks/contexts/volumes.json deleted file mode 100644 index f5594650da..0000000000 --- a/samples/tasks/contexts/volumes.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "volumes": { - "size": 1, - "volumes_per_tenant": 4 - } - } - } - ] -} diff --git a/samples/tasks/contexts/volumes.yaml b/samples/tasks/contexts/volumes.yaml deleted file mode 100644 index 3980f2115f..0000000000 --- a/samples/tasks/contexts/volumes.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - volumes: - size: 1 - volumes_per_tenant: 4 diff --git a/samples/tasks/contexts/zones.json b/samples/tasks/contexts/zones.json deleted file mode 100644 index 7bd3233953..0000000000 --- a/samples/tasks/contexts/zones.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "Dummy.openstack": [ - { - "args": { - "sleep": 0.1 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 2 - }, - "zones": { - "zones_per_tenant": 1 - } - } - } - ] -} diff --git a/samples/tasks/contexts/zones.yaml b/samples/tasks/contexts/zones.yaml deleted file mode 100644 index 7ae5f5b4ed..0000000000 --- a/samples/tasks/contexts/zones.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Dummy.openstack: - - - args: - sleep: 0.1 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 diff --git a/samples/tasks/scenarios/authenticate/keystone.json b/samples/tasks/scenarios/authenticate/keystone.json deleted file mode 100644 index 38a841306c..0000000000 --- a/samples/tasks/scenarios/authenticate/keystone.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "Authenticate.keystone": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 50 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/keystone.yaml b/samples/tasks/scenarios/authenticate/keystone.yaml deleted file mode 100644 index 23d613ecdc..0000000000 --- a/samples/tasks/scenarios/authenticate/keystone.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Authenticate.keystone: - - - runner: - type: "constant" - times: 100 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 50 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/authenticate/token-validate-ceilometer.json b/samples/tasks/scenarios/authenticate/token-validate-ceilometer.json deleted file mode 100644 index c0e4fa248d..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-ceilometer.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Authenticate.validate_ceilometer": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-ceilometer.yaml b/samples/tasks/scenarios/authenticate/token-validate-ceilometer.yaml deleted file mode 100644 index 447eebe79f..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-ceilometer.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Authenticate.validate_ceilometer: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/authenticate/token-validate-cinder.json b/samples/tasks/scenarios/authenticate/token-validate-cinder.json deleted file mode 100644 index 1d35a29a87..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-cinder.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Authenticate.validate_cinder": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-cinder.yaml b/samples/tasks/scenarios/authenticate/token-validate-cinder.yaml deleted file mode 100644 index 8749112dcc..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-cinder.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Authenticate.validate_cinder: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/authenticate/token-validate-glance.json b/samples/tasks/scenarios/authenticate/token-validate-glance.json deleted file mode 100644 index 080cac16a3..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-glance.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Authenticate.validate_glance": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-glance.yaml b/samples/tasks/scenarios/authenticate/token-validate-glance.yaml deleted file mode 100644 index 1404427f4e..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-glance.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Authenticate.validate_glance: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/authenticate/token-validate-heat.json b/samples/tasks/scenarios/authenticate/token-validate-heat.json deleted file mode 100644 index 1e3db626d1..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-heat.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Authenticate.validate_heat": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-heat.yaml b/samples/tasks/scenarios/authenticate/token-validate-heat.yaml deleted file mode 100644 index 78b78fc0eb..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-heat.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Authenticate.validate_heat: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/authenticate/token-validate-monasca.json b/samples/tasks/scenarios/authenticate/token-validate-monasca.json deleted file mode 100644 index c8bcc39e23..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-monasca.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Authenticate.validate_monasca": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-monasca.yaml b/samples/tasks/scenarios/authenticate/token-validate-monasca.yaml deleted file mode 100644 index c1d7ff8611..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-monasca.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Authenticate.validate_monasca: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/authenticate/token-validate-neutron.json b/samples/tasks/scenarios/authenticate/token-validate-neutron.json deleted file mode 100644 index bde1b739f4..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-neutron.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Authenticate.validate_neutron": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-neutron.yaml b/samples/tasks/scenarios/authenticate/token-validate-neutron.yaml deleted file mode 100644 index d418030019..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-neutron.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Authenticate.validate_neutron: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/authenticate/token-validate-nova.json b/samples/tasks/scenarios/authenticate/token-validate-nova.json deleted file mode 100644 index 54345fad0c..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-nova.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Authenticate.validate_nova": [ - { - "args": { - "repetitions": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/authenticate/token-validate-nova.yaml b/samples/tasks/scenarios/authenticate/token-validate-nova.yaml deleted file mode 100644 index 2bb423992f..0000000000 --- a/samples/tasks/scenarios/authenticate/token-validate-nova.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Authenticate.validate_nova: - - - args: - repetitions: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/all-list-meters.json b/samples/tasks/scenarios/ceilometer/all-list-meters.json deleted file mode 100644 index a7c4affcf5..0000000000 --- a/samples/tasks/scenarios/ceilometer/all-list-meters.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "CeilometerMeters.list_meters": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "rally_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally on", - "deleted": "false"}, - {"status": "terminated", "name": "rally off", - "deleted": "true"} - ] - } - }, - "args": { - "limit": 50, - "metadata_query": {"status": "terminated"} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - - diff --git a/samples/tasks/scenarios/ceilometer/all-list-meters.yaml b/samples/tasks/scenarios/ceilometer/all-list-meters.yaml deleted file mode 100644 index 47347b3941..0000000000 --- a/samples/tasks/scenarios/ceilometer/all-list-meters.yaml +++ /dev/null @@ -1,35 +0,0 @@ ---- - CeilometerMeters.list_meters: - - - runner: - type: constant - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "rally_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally on" - deleted: "false" - - - status: "terminated" - name: "rally off" - deleted: "true" - args: - limit: 50 - metadata_query: - status: "terminated" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/all-list-resources.json b/samples/tasks/scenarios/ceilometer/all-list-resources.json deleted file mode 100644 index 43a7bfc8f4..0000000000 --- a/samples/tasks/scenarios/ceilometer/all-list-resources.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "CeilometerResource.list_resources": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "rally_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally on", - "deleted": "false"}, - {"status": "terminated", "name": "rally off", - "deleted": "true"} - ] - } - }, - "args": { - "limit":50, - "metadata_query": {"status": "terminated"} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/ceilometer/all-list-resources.yaml b/samples/tasks/scenarios/ceilometer/all-list-resources.yaml deleted file mode 100644 index 2f212b1e0a..0000000000 --- a/samples/tasks/scenarios/ceilometer/all-list-resources.yaml +++ /dev/null @@ -1,35 +0,0 @@ ---- - CeilometerResource.list_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "rally_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally on" - deleted: "false" - - - status: "terminated" - name: "rally off" - deleted: "true" - args: - limit: 50 - metadata_query: - status: "terminated" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.json b/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.json deleted file mode 100644 index 450c2dd63f..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "CeilometerAlarms.create_alarm_and_get_history": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "state": "ok", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.yaml b/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.yaml deleted file mode 100644 index a5e534d53e..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-alarm-and-get-history.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - CeilometerAlarms.create_alarm_and_get_history: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - state: "ok" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-alarm.json b/samples/tasks/scenarios/ceilometer/create-alarm.json deleted file mode 100644 index 8b1ccc9e6e..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-alarm.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "CeilometerAlarms.create_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-alarm.yaml deleted file mode 100644 index e9c7fe8cb8..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-alarm.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - CeilometerAlarms.create_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.json b/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.json deleted file mode 100644 index 41cb993091..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "CeilometerAlarms.create_and_delete_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.yaml deleted file mode 100644 index 5c952ed6ad..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-delete-alarm.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - CeilometerAlarms.create_and_delete_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-and-get-alarm.json b/samples/tasks/scenarios/ceilometer/create-and-get-alarm.json deleted file mode 100644 index 002789e7ed..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-get-alarm.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "CeilometerAlarms.create_and_get_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-get-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-and-get-alarm.yaml deleted file mode 100644 index 5ad7c024a6..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-get-alarm.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - CeilometerAlarms.create_and_get_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-and-list-alarm.json b/samples/tasks/scenarios/ceilometer/create-and-list-alarm.json deleted file mode 100644 index 27e65d2b9b..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-list-alarm.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "CeilometerAlarms.create_and_list_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-list-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-and-list-alarm.yaml deleted file mode 100644 index 5e53723bae..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-list-alarm.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - CeilometerAlarms.create_and_list_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.json b/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.json deleted file mode 100644 index 074a69cdce..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "CeilometerQueries.create_and_query_alarm_history": [ - { - "args": { - "orderby": null, - "limit": null, - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.yaml b/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.yaml deleted file mode 100644 index 9a3ab3de70..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-alarm-history.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - CeilometerQueries.create_and_query_alarm_history: - - - args: - orderby: !!null - limit: !!null - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-alarms.json b/samples/tasks/scenarios/ceilometer/create-and-query-alarms.json deleted file mode 100644 index 4b40fddd08..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-alarms.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "CeilometerQueries.create_and_query_alarms": [ - { - "args": { - "filter": {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}, - "orderby": null, - "limit": 10, - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-alarms.yaml b/samples/tasks/scenarios/ceilometer/create-and-query-alarms.yaml deleted file mode 100644 index 30b7b8d401..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-alarms.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - CeilometerQueries.create_and_query_alarms: - - - args: - filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} - orderby: !!null - limit: 10 - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-samples.json b/samples/tasks/scenarios/ceilometer/create-and-query-samples.json deleted file mode 100644 index 4ada249bdd..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-samples.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "CeilometerQueries.create_and_query_samples": [ - { - "args": { - "filter": {"=": {"counter_unit": "instance"}}, - "orderby": null, - "limit": 10, - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0, - "resource_id": "resource_id" - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-query-samples.yaml b/samples/tasks/scenarios/ceilometer/create-and-query-samples.yaml deleted file mode 100644 index 9ffacf25da..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-query-samples.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - CeilometerQueries.create_and_query_samples: - - - args: - filter: {"=": {"counter_unit": "instance"}} - orderby: !!null - limit: 10 - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resource_id: "resource_id" - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-and-update-alarm.json b/samples/tasks/scenarios/ceilometer/create-and-update-alarm.json deleted file mode 100644 index 2d807d88c9..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-update-alarm.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "CeilometerAlarms.create_and_update_alarm": [ - { - "args": { - "meter_name": "ram_util", - "threshold": 10.0, - "type": "threshold", - "statistic": "avg", - "alarm_actions": ["http://localhost:8776/alarm"], - "ok_actions": ["http://localhost:8776/ok"], - "insufficient_data_actions": ["http://localhost:8776/notok"] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-and-update-alarm.yaml b/samples/tasks/scenarios/ceilometer/create-and-update-alarm.yaml deleted file mode 100644 index f7a4856326..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-and-update-alarm.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - CeilometerAlarms.create_and_update_alarm: - - - args: - meter_name: "ram_util" - threshold: 10.0 - type: "threshold" - statistic: "avg" - alarm_actions: ["http://localhost:8776/alarm"] - ok_actions: ["http://localhost:8776/ok"] - insufficient_data_actions: ["http://localhost:8776/notok"] - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.json b/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.json deleted file mode 100644 index bc04e12a91..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "CeilometerStats.create_meter_and_get_stats": [ - { - "args": { - "user_id": "user-id", - "resource_id": "resource-id", - "counter_volume": 1.0, - "counter_unit": "", - "counter_type": "cumulative" - }, - "runner": { - "type": "constant", - "times": 200, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.yaml b/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.yaml deleted file mode 100644 index 5b0c957832..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-meter-and-get-stats.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - CeilometerStats.create_meter_and_get_stats: - - - args: - user_id: "user-id" - resource_id: "resource-id" - counter_volume: 1.0 - counter_unit: "" - counter_type: "cumulative" - runner: - type: "constant" - times: 200 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-get-event.json b/samples/tasks/scenarios/ceilometer/create-user-and-get-event.json deleted file mode 100644 index d8519a3c44..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-get-event.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "CeilometerEvents.create_user_and_get_event": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-get-event.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-get-event.yaml deleted file mode 100644 index d2cc1b43fc..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-get-event.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - CeilometerEvents.create_user_and_get_event: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.json b/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.json deleted file mode 100644 index fe6a639916..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "CeilometerEvents.create_user_and_list_event_types": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.yaml deleted file mode 100644 index 79b8e17364..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-event-types.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - CeilometerEvents.create_user_and_list_event_types: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-events.json b/samples/tasks/scenarios/ceilometer/create-user-and-list-events.json deleted file mode 100644 index 9b3ad2ac0d..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-events.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "CeilometerEvents.create_user_and_list_events": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-events.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-list-events.yaml deleted file mode 100644 index 7d9ba18615..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-events.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - CeilometerEvents.create_user_and_list_events: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.json b/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.json deleted file mode 100644 index b5dce2a670..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "CeilometerTraits.create_user_and_list_trait_descriptions": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.yaml deleted file mode 100644 index a7b4bc7e26..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-trait-descriptions.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - CeilometerTraits.create_user_and_list_trait_descriptions: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.json b/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.json deleted file mode 100644 index 39a42196f0..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "CeilometerTraits.create_user_and_list_traits": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.yaml b/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.yaml deleted file mode 100644 index 95dfc84aac..0000000000 --- a/samples/tasks/scenarios/ceilometer/create-user-and-list-traits.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - CeilometerTraits.create_user_and_list_traits: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/get-stats.json b/samples/tasks/scenarios/ceilometer/get-stats.json deleted file mode 100644 index 73b0270493..0000000000 --- a/samples/tasks/scenarios/ceilometer/get-stats.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "CeilometerStats.get_stats": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "rally_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally on", - "deleted": "false"}, - {"status": "terminated", "name": "rally off", - "deleted": "true"} - ] - } - }, - "args": { - "meter_name": "rally_meter", - "filter_by_user_id": true, - "filter_by_project_id": true, - "filter_by_resource_id": true, - "metadata_query": {"status": "terminated"}, - "period": 300, - "groupby": "resource_id" - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/ceilometer/get-stats.yaml b/samples/tasks/scenarios/ceilometer/get-stats.yaml deleted file mode 100644 index 731021ad50..0000000000 --- a/samples/tasks/scenarios/ceilometer/get-stats.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- - CeilometerStats.get_stats: - - - runner: - type: constant - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "rally_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally on" - deleted: "false" - - - status: "terminated" - name: "rally off" - deleted: "true" - args: - meter_name: "rally_meter" - filter_by_user_id: true - filter_by_project_id: true - filter_by_resource_id: true - metadata_query: - status: "terminated" - period: 300 - groupby: "resource_id" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/get-tenant-resources.json b/samples/tasks/scenarios/ceilometer/get-tenant-resources.json deleted file mode 100644 index 0b18849ef8..0000000000 --- a/samples/tasks/scenarios/ceilometer/get-tenant-resources.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "CeilometerResource.get_tenant_resources": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/get-tenant-resources.yaml b/samples/tasks/scenarios/ceilometer/get-tenant-resources.yaml deleted file mode 100644 index 7515738e88..0000000000 --- a/samples/tasks/scenarios/ceilometer/get-tenant-resources.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - CeilometerResource.get_tenant_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_volume: 1.0 - counter_unit: "instance" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/list-alarms.json b/samples/tasks/scenarios/ceilometer/list-alarms.json deleted file mode 100644 index 19059852ac..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-alarms.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "CeilometerAlarms.list_alarms": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/ceilometer/list-alarms.yaml b/samples/tasks/scenarios/ceilometer/list-alarms.yaml deleted file mode 100644 index b897a54d21..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-alarms.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - CeilometerAlarms.list_alarms: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/list-matched-samples.json b/samples/tasks/scenarios/ceilometer/list-matched-samples.json deleted file mode 100644 index 4966741737..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-matched-samples.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "CeilometerSamples.list_matched_samples": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 60, - "metadata_list": [ - {"status": "active", "name": "fake_resource", - "deleted": "False", - "created_at": "2015-09-04T12:34:19.000000"}, - {"status": "not_active", "name": "fake_resource_1", - "deleted": "False", - "created_at": "2015-09-10T06:55:12.000000"} - ] - } - }, - "args":{ - "filter_by_user_id": true, - "filter_by_project_id": true, - "filter_by_resource_id": true, - "limit": 50, - "metadata_query": {"status": "not_active"} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/list-matched-samples.yaml b/samples/tasks/scenarios/ceilometer/list-matched-samples.yaml deleted file mode 100644 index de4d1f5065..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-matched-samples.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- - CeilometerSamples.list_matched_samples: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 60 - metadata_list: - - status: "active" - name: "fake_resource" - deleted: "False" - created_at: "2015-09-04T12:34:19.000000" - - status: "not_active" - name: "fake_resource_1" - deleted: "False" - created_at: "2015-09-10T06:55:12.000000" - args: - limit: 50 - filter_by_user_id: true - filter_by_project_id: true - filter_by_resource_id: true - metadata_query: - status: "not_active" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/list-meters.json b/samples/tasks/scenarios/ceilometer/list-meters.json deleted file mode 100644 index 1b8c877c0d..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-meters.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "CeilometerMeters.list_matched_meters": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "rally_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally on", - "deleted": "false"}, - {"status": "terminated", "name": "rally off", - "deleted": "true"} - ] - } - }, - "args": { - "filter_by_user_id": true, - "filter_by_project_id": true, - "filter_by_resource_id": true, - "limit": 50, - "metadata_query": {"status": "terminated"} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - - diff --git a/samples/tasks/scenarios/ceilometer/list-meters.yaml b/samples/tasks/scenarios/ceilometer/list-meters.yaml deleted file mode 100644 index 534388d5b5..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-meters.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- - CeilometerMeters.list_matched_meters: - - - runner: - type: constant - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "rally_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally on" - deleted: "false" - - - status: "terminated" - name: "rally off" - deleted: "true" - args: - limit: 50 - filter_by_user_id: true - filter_by_project_id: true - filter_by_resource_id: true - metadata_query: - status: "terminated" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/list-resources.json b/samples/tasks/scenarios/ceilometer/list-resources.json deleted file mode 100644 index d1e4741157..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-resources.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "CeilometerResource.list_matched_resources": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "rally_meter", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 100, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 10, - "metadata_list": [ - {"status": "active", "name": "rally on", - "deleted": "false"}, - {"status": "terminated", "name": "rally off", - "deleted": "true"} - ] - } - }, - "args": { - "limit":50, - "metadata_query": {"status": "terminated"}, - "filter_by_user_id": true, - "filter_by_project_id": true - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/ceilometer/list-resources.yaml b/samples/tasks/scenarios/ceilometer/list-resources.yaml deleted file mode 100644 index ca2cccb57c..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-resources.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- - CeilometerResource.list_matched_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "rally_meter" - counter_type: "gauge" - counter_unit: "%" - counter_volume: 100 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 10 - metadata_list: - - - status: "active" - name: "rally on" - deleted: "false" - - - status: "terminated" - name: "rally off" - deleted: "true" - args: - limit: 50 - filter_by_user_id: true - filter_by_project_id: true - metadata_query: - status: "terminated" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ceilometer/list-samples.json b/samples/tasks/scenarios/ceilometer/list-samples.json deleted file mode 100644 index 4320f2aa41..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-samples.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "CeilometerSamples.list_samples": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "ceilometer": { - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0, - "resources_per_tenant": 100, - "samples_per_resource": 100, - "timestamp_interval": 60, - "metadata_list": [ - {"status": "active", "name": "fake_resource", - "deleted": "False", - "created_at": "2015-09-04T12:34:19.000000"}, - {"status": "not_active", "name": "fake_resource_1", - "deleted": "False", - "created_at": "2015-09-10T06:55:12.000000"} - ], - "batch_size": 5 - } - }, - "args":{ - "limit": 50, - "metadata_query": {"status": "not_active"} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ceilometer/list-samples.yaml b/samples/tasks/scenarios/ceilometer/list-samples.yaml deleted file mode 100644 index 51df00a38f..0000000000 --- a/samples/tasks/scenarios/ceilometer/list-samples.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- - CeilometerSamples.list_samples: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - ceilometer: - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: 1.0 - resources_per_tenant: 100 - samples_per_resource: 100 - timestamp_interval: 60 - metadata_list: - - status: "active" - name: "fake_resource" - deleted: "False" - created_at: "2015-09-04T12:34:19.000000" - - status: "not_active" - name: "fake_resource_1" - deleted: "False" - created_at: "2015-09-10T06:55:12.000000" - batch_size: 5 - args: - limit: 50 - metadata_query: - status: "not_active" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/creat-qos-and-associate-type.json b/samples/tasks/scenarios/cinder/creat-qos-and-associate-type.json deleted file mode 100644 index 6c7d6371bb..0000000000 --- a/samples/tasks/scenarios/cinder/creat-qos-and-associate-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "CinderQos.create_qos_associate_and_disassociate_type": [ - { - "args": { - "consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volume_types": [ - "test_type1", - "test_type2", - "test_type3", - "test_type4", - "test_type5" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/creat-qos-and-associate-type.yaml b/samples/tasks/scenarios/cinder/creat-qos-and-associate-type.yaml deleted file mode 100644 index ae5bf263f4..0000000000 --- a/samples/tasks/scenarios/cinder/creat-qos-and-associate-type.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - CinderQos.create_qos_associate_and_disassociate_type: - - - args: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4", - "test_type5", - ] - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-accept-transfer.json b/samples/tasks/scenarios/cinder/create-and-accept-transfer.json deleted file mode 100644 index 339fcf3bdc..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-accept-transfer.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumes.create_and_accept_transfer": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-accept-transfer.yaml b/samples/tasks/scenarios/cinder/create-and-accept-transfer.yaml deleted file mode 100644 index 32a15eaff1..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-accept-transfer.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumes.create_and_accept_transfer: - - - args: - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-attach-volume.json b/samples/tasks/scenarios/cinder/create-and-attach-volume.json deleted file mode 100644 index bd65b1e97d..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-attach-volume.json +++ /dev/null @@ -1,69 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set availability_zone = availability_zone or "nova" %} -{ - "CinderVolumes.create_and_attach_volume": [ - { - "args": { - "size": 10, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - }, - "create_volume_params": { - "availability_zone": "{{availability_zone}}" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "create_volume_params": { - "availability_zone": "{{availability_zone}}" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-attach-volume.yaml b/samples/tasks/scenarios/cinder/create-and-attach-volume.yaml deleted file mode 100644 index 17608f2b7d..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-attach-volume.yaml +++ /dev/null @@ -1,46 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set availability_zone = availability_zone or "nova" %} ---- - CinderVolumes.create_and_attach_volume: - - - args: - size: 10 - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - create_volume_params: - availability_zone: "{{availability_zone}}" - runner: - type: "constant" - times: 5 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - create_volume_params: - availability_zone: "{{availability_zone}}" - runner: - type: "constant" - times: 5 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.json b/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.json deleted file mode 100644 index 46edc50adb..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "CinderVolumeTypes.create_and_delete_encryption_type": [ - { - "args": { - "provider": "LuksEncryptor", - "cipher": "aes-xts-plain64", - "key_size": 512, - "control_location": "front-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volume_types": [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.yaml deleted file mode 100644 index 1357880b85..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-encryption-type.yaml +++ /dev/null @@ -1,24 +0,0 @@ - CinderVolumeTypes.create_and_delete_encryption_type: - - - args: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-delete-snapshot.json b/samples/tasks/scenarios/cinder/create-and-delete-snapshot.json deleted file mode 100644 index 14c0c13fc1..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-snapshot.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "CinderVolumes.create_and_delete_snapshot": [ - { - "args": { - "force": false - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volumes": { - "size": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-delete-snapshot.yaml b/samples/tasks/scenarios/cinder/create-and-delete-snapshot.yaml deleted file mode 100644 index c79e691d18..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-snapshot.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderVolumes.create_and_delete_snapshot: - - - args: - force: false - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volumes: - size: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-delete-volume-type.json b/samples/tasks/scenarios/cinder/create-and-delete-volume-type.json deleted file mode 100644 index 7514b7c8b7..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-volume-type.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumeTypes.create_and_delete_volume_type": [ - { - "args": { - "description": "rally tests creating types" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-delete-volume-type.yaml b/samples/tasks/scenarios/cinder/create-and-delete-volume-type.yaml deleted file mode 100644 index 225a7c9dae..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-volume-type.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumeTypes.create_and_delete_volume_type: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-delete-volume.json b/samples/tasks/scenarios/cinder/create-and-delete-volume.json deleted file mode 100644 index 5750980ec9..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-volume.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "CinderVolumes.create_and_delete_volume": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-delete-volume.yaml b/samples/tasks/scenarios/cinder/create-and-delete-volume.yaml deleted file mode 100644 index 036d874d85..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-delete-volume.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - CinderVolumes.create_and_delete_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-extend-volume.json b/samples/tasks/scenarios/cinder/create-and-extend-volume.json deleted file mode 100644 index 354657369d..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-extend-volume.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "CinderVolumes.create_and_extend_volume": [ - { - "args": { - "size": 1, - "new_size": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "new_size": { - "min": 6, - "max": 10 - } - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-extend-volume.yaml b/samples/tasks/scenarios/cinder/create-and-extend-volume.yaml deleted file mode 100644 index c6b2bb7f12..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-extend-volume.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- - CinderVolumes.create_and_extend_volume: - - - args: - size: 1 - new_size: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - new_size: - min: 6 - max: 10 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-get-qos.json b/samples/tasks/scenarios/cinder/create-and-get-qos.json deleted file mode 100644 index 6a10f11134..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-get-qos.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "CinderQos.create_and_get_qos": [ - { - "args": { - "consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-get-qos.yaml b/samples/tasks/scenarios/cinder/create-and-get-qos.yaml deleted file mode 100644 index 1ba83b4976..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-get-qos.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderQos.create_and_get_qos: - - - args: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-get-volume-type.json b/samples/tasks/scenarios/cinder/create-and-get-volume-type.json deleted file mode 100644 index 269023f26f..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-get-volume-type.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumeTypes.create_and_get_volume_type": [ - { - "args": { - "description": "rally tests creating types" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-get-volume-type.yaml b/samples/tasks/scenarios/cinder/create-and-get-volume-type.yaml deleted file mode 100644 index 8aad5bcc3c..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-get-volume-type.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumeTypes.create_and_get_volume_type: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-get-volume.json b/samples/tasks/scenarios/cinder/create-and-get-volume.json deleted file mode 100644 index 1825b876e7..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-get-volume.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "CinderVolumes.create_and_get_volume": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-get-volume.yaml b/samples/tasks/scenarios/cinder/create-and-get-volume.yaml deleted file mode 100644 index f320e95821..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-get-volume.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - CinderVolumes.create_and_get_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-encryption-type.json b/samples/tasks/scenarios/cinder/create-and-list-encryption-type.json deleted file mode 100644 index 0233c4a2cc..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-encryption-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "CinderVolumeTypes.create_and_list_encryption_type": [ - { - "args": { - "provider": "LuksEncryptor", - "cipher": "aes-xts-plain64", - "key_size": 512, - "control_location": "front-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volume_types": [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-and-list-encryption-type.yaml deleted file mode 100644 index 193006e28c..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-encryption-type.yaml +++ /dev/null @@ -1,24 +0,0 @@ - CinderVolumeTypes.create_and_list_encryption_type: - - - args: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-qos.json b/samples/tasks/scenarios/cinder/create-and-list-qos.json deleted file mode 100644 index e48d4a8c40..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-qos.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "CinderQos.create_and_list_qos": [ - { - "args": { - "consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-qos.yaml b/samples/tasks/scenarios/cinder/create-and-list-qos.yaml deleted file mode 100644 index 294199a0f1..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-qos.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderQos.create_and_list_qos: - - - args: - consumer: "both" - write_iops_sec: "10" - read_iops_sec: "1000" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-snapshots.json b/samples/tasks/scenarios/cinder/create-and-list-snapshots.json deleted file mode 100644 index 10e8fd2e73..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-snapshots.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "CinderVolumes.create_and_list_snapshots": [ - { - "args": { - "force": false, - "detailed": true - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "volumes": { - "size": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-snapshots.yaml b/samples/tasks/scenarios/cinder/create-and-list-snapshots.yaml deleted file mode 100644 index a8ec579bd3..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-snapshots.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - CinderVolumes.create_and_list_snapshots: - - - args: - force: False - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume-backups.json b/samples/tasks/scenarios/cinder/create-and-list-volume-backups.json deleted file mode 100644 index 934402f638..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume-backups.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "CinderVolumes.create_and_list_volume_backups": [ - { - "args": { - "size": 1, - "detailed": true, - "do_delete": true, - "create_volume_kwargs": {}, - "create_backup_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": ["Member"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume-backups.yaml b/samples/tasks/scenarios/cinder/create-and-list-volume-backups.yaml deleted file mode 100644 index be8cd868ed..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume-backups.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - CinderVolumes.create_and_list_volume_backups: - - - args: - size: 1 - detailed: True - do_delete: True - create_volume_kwargs: {} - create_backup_kwargs: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume-types.json b/samples/tasks/scenarios/cinder/create-and-list-volume-types.json deleted file mode 100644 index 80d70f2e86..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume-types.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumeTypes.create_and_list_volume_types": [ - { - "args": { - "description": "rally tests creating types" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume-types.yaml b/samples/tasks/scenarios/cinder/create-and-list-volume-types.yaml deleted file mode 100644 index 7c6edad5cd..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume-types.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumeTypes.create_and_list_volume_types: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume.json b/samples/tasks/scenarios/cinder/create-and-list-volume.json deleted file mode 100644 index 82117f8389..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "CinderVolumes.create_and_list_volume": [ - { - "args": { - "size": 1, - "detailed": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "detailed": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-list-volume.yaml b/samples/tasks/scenarios/cinder/create-and-list-volume.yaml deleted file mode 100644 index 7876fa9ff4..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-list-volume.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- - CinderVolumes.create_and_list_volume: - - - args: - size: 1 - detailed: True - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - detailed: True - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.json b/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.json deleted file mode 100644 index d7070486b5..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "CinderVolumes.create_and_restore_volume_backup": [ - { - "args": { - "size": 1, - "do_delete": true, - "create_volume_kwargs": {}, - "create_backup_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": ["Member"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.yaml b/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.yaml deleted file mode 100644 index 0380f3448b..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-restore-volume-backup.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - CinderVolumes.create_and_restore_volume_backup: - - - args: - size: 1 - do_delete: True - create_volume_kwargs: {} - create_backup_kwargs: {} - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-set-qos.json b/samples/tasks/scenarios/cinder/create-and-set-qos.json deleted file mode 100644 index b206e1336c..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-set-qos.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "CinderQos.create_and_set_qos": [ - { - "args": { - "consumer": "back-end", - "write_iops_sec": "10", - "read_iops_sec": "1000", - "set_consumer": "both", - "set_write_iops_sec": "11", - "set_read_iops_sec": "1001" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-set-qos.yaml b/samples/tasks/scenarios/cinder/create-and-set-qos.yaml deleted file mode 100644 index 285b6472d4..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-set-qos.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - CinderQos.create_and_set_qos: - - - args: - consumer: "back-end" - write_iops_sec: "10" - read_iops_sec: "1000" - set_consumer: "both" - set_write_iops_sec: "11" - set_read_iops_sec: "1001" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.json b/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.json deleted file mode 100644 index b99b051b06..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "CinderVolumeTypes.create_and_set_volume_type_keys": [ - { - "args": { - "description": "rally tests creating types", - "volume_type_key": { - "volume_backend_name": "LVM_iSCSI" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.yaml b/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.yaml deleted file mode 100644 index a95e3d62f7..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-set-volume-type-keys.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderVolumeTypes.create_and_set_volume_type_keys: - - - args: - description: "rally tests creating types" - volume_type_key: - volume_backend_name: "LVM_iSCSI" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-update-encryption-type.json b/samples/tasks/scenarios/cinder/create-and-update-encryption-type.json deleted file mode 100644 index 8f1368a479..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-update-encryption-type.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "CinderVolumeTypes.create_and_update_encryption_type": [ - { - "args": { - "create_provider": "LuksEncryptor", - "create_cipher": "aes-xts-plain64", - "create_key_size": 512, - "create_control_location": "front-end", - "update_provider": "CryptsetupEncryptor", - "update_cipher": "aes-xts-plain", - "update_key_size": 256, - "update_control_location": "back-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volume_types": [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-update-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-and-update-encryption-type.yaml deleted file mode 100644 index 161b71ed39..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-update-encryption-type.yaml +++ /dev/null @@ -1,28 +0,0 @@ - CinderVolumeTypes.create_and_update_encryption_type: - - - args: - create_provider: "LuksEncryptor" - create_cipher: "aes-xts-plain64" - create_key_size: 512 - create_control_location: "front-end" - update_provider: "CryptsetupEncryptor" - update_cipher: "aes-xts-plain" - update_key_size: 256 - update_control_location: "back-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.json b/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.json deleted file mode 100644 index 8ed6a5fac1..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "CinderVolumes.create_volume_and_update_readonly_flag": [ - { - "args": { - "size": 1, - "read_only": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.yaml b/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.yaml deleted file mode 100644 index f8edb26a39..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-update-readonly-flag.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - CinderVolumes.create_volume_and_update_readonly_flag: - - - args: - size: 1 - read_only: true - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-update-volume-type.json b/samples/tasks/scenarios/cinder/create-and-update-volume-type.json deleted file mode 100644 index f5a95e6d74..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-update-volume-type.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "CinderVolumeTypes.create_and_update_volume_type": [ - { - "args": { - "description": "test", - "update_description": "test update" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-update-volume-type.yaml b/samples/tasks/scenarios/cinder/create-and-update-volume-type.yaml deleted file mode 100644 index 8c09cbf96c..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-update-volume-type.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - CinderVolumeTypes.create_and_update_volume_type: - - - args: - description: "test" - update_description: "test update" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-update-volume.json b/samples/tasks/scenarios/cinder/create-and-update-volume.json deleted file mode 100644 index cd8650604d..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-update-volume.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "CinderVolumes.create_and_update_volume": [ - { - "args": { - "create_volume_kwargs": {}, - "update_volume_kwargs": { - "description": "desc_updated" - }, - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-update-volume.yaml b/samples/tasks/scenarios/cinder/create-and-update-volume.yaml deleted file mode 100644 index 40f9359a2f..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-update-volume.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - CinderVolumes.create_and_update_volume: - - - args: - update_volume_kwargs: - description: "desc_updated" - create_volume_kwargs: {} - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.json b/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.json deleted file mode 100644 index 148533be92..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "CinderVolumes.create_and_upload_volume_to_image": [ - { - "args": { - "size": 1, - "force": false, - "container_format": "bare", - "disk_format": "raw", - "do_delete": true, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "force": false, - "container_format": "bare", - "disk_format": "raw", - "do_delete": true, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.yaml b/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.yaml deleted file mode 100644 index deb70001a8..0000000000 --- a/samples/tasks/scenarios/cinder/create-and-upload-volume-to-image.yaml +++ /dev/null @@ -1,44 +0,0 @@ ---- - CinderVolumes.create_and_upload_volume_to_image: - - - args: - size: 1 - force: false - container_format: "bare" - disk_format: "raw" - do_delete: true - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - force: false - container_format: "bare" - disk_format: "raw" - do_delete: true - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-encryption-type.json b/samples/tasks/scenarios/cinder/create-encryption-type.json deleted file mode 100644 index a32f7c7f24..0000000000 --- a/samples/tasks/scenarios/cinder/create-encryption-type.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "CinderVolumeTypes.create_volume_type_and_encryption_type": [ - { - "args": { - "description": "rally tests creating types", - "provider": "LuksEncryptor", - "cipher": "aes-xts-plain64", - "key_size": 512, - "control_location": "front-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-encryption-type.yaml deleted file mode 100644 index 5898817f7d..0000000000 --- a/samples/tasks/scenarios/cinder/create-encryption-type.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - CinderVolumeTypes.create_volume_type_and_encryption_type: - - - args: - description: "rally tests creating types" - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.json b/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.json deleted file mode 100644 index 876c55dde6..0000000000 --- a/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "CinderVolumes.create_and_delete_volume": [ - { - "args": { - "size": 1, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.yaml b/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.yaml deleted file mode 100644 index 8a68e71214..0000000000 --- a/samples/tasks/scenarios/cinder/create-from-image-and-delete-volume.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderVolumes.create_and_delete_volume: - - - args: - size: 1 - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.json b/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.json deleted file mode 100644 index d990052dcf..0000000000 --- a/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "CinderVolumes.create_from_volume_and_delete_volume": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "volumes": { - "size": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "volumes": { - "size": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.yaml b/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.yaml deleted file mode 100644 index 04ef25689c..0000000000 --- a/samples/tasks/scenarios/cinder/create-from-volume-and-delete-volume.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- - CinderVolumes.create_from_volume_and_delete_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.json b/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.json deleted file mode 100644 index e3e283a3a0..0000000000 --- a/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "CinderVolumeTypes.create_get_and_delete_encryption_type": [ - { - "args": { - "provider": "LuksEncryptor", - "cipher": "aes-xts-plain64", - "key_size": 512, - "control_location": "front-end" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volume_types": [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.yaml b/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.yaml deleted file mode 100644 index 2e53a2b5d2..0000000000 --- a/samples/tasks/scenarios/cinder/create-get-and-delete-encryption-type.yaml +++ /dev/null @@ -1,24 +0,0 @@ - CinderVolumeTypes.create_get_and_delete_encryption_type: - - - args: - provider: "LuksEncryptor" - cipher: "aes-xts-plain64" - key_size: 512 - control_location: "front-end" - runner: - type: "constant" - times: 4 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - volume_types: [ - "test_type1", - "test_type2", - "test_type3", - "test_type4" - ] - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-incremental-volume-backup.json b/samples/tasks/scenarios/cinder/create-incremental-volume-backup.json deleted file mode 100644 index 81f1074784..0000000000 --- a/samples/tasks/scenarios/cinder/create-incremental-volume-backup.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "CinderVolumeBackups.create_incremental_volume_backup": [ - { - "args": { - "size": 1, - "create_volume_kwargs": {}, - "create_backup_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "roles": ["admin"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-incremental-volume-backup.yaml b/samples/tasks/scenarios/cinder/create-incremental-volume-backup.yaml deleted file mode 100644 index 73b9a49063..0000000000 --- a/samples/tasks/scenarios/cinder/create-incremental-volume-backup.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - CinderVolumeBackups.create_incremental_volume_backup: - - - args: - size: 1 - create_volume_kwargs: {} - create_backup_kwargs: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - roles: - - "admin" - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.json b/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.json deleted file mode 100644 index bc5093d8ae..0000000000 --- a/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.json +++ /dev/null @@ -1,36 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "CinderVolumes.create_nested_snapshots_and_attach_volume": [ - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "nested_level": 5, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - } - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.yaml b/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.yaml deleted file mode 100644 index 2c5de260fc..0000000000 --- a/samples/tasks/scenarios/cinder/create-nested-snapshots-and-attach-volume.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - CinderVolumes.create_nested_snapshots_and_attach_volume: - - - args: - size: - min: 1 - max: 5 - nested_level: 5 - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.json b/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.json deleted file mode 100644 index ae297d54b2..0000000000 --- a/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.json +++ /dev/null @@ -1,68 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "CinderVolumes.create_snapshot_and_attach_volume": [ - { - "args": { - "volume_type": "lvmdriver-1", - "size": { - "min": 1, - "max": 5 - }, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - } - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "volume_type": "test", - "size": { - "min": 1, - "max": 5 - }, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - } - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - }, - "volume_types": ["test"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.yaml b/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.yaml deleted file mode 100644 index b66c22de78..0000000000 --- a/samples/tasks/scenarios/cinder/create-snapshot-and-attach-volume.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - CinderVolumes.create_snapshot_and_attach_volume: - - - args: - volume_type: "lvmdriver-1" - size: - min: 1 - max: 5 - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 - - - args: - volume_type: "test" - size: - min: 1 - max: 5 - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - volume_types: - - "test" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-volume-and-clone.json b/samples/tasks/scenarios/cinder/create-volume-and-clone.json deleted file mode 100755 index 0ca4816d0a..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume-and-clone.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "CinderVolumes.create_volume_and_clone": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - }, - "nested_level": 3 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume-and-clone.yaml b/samples/tasks/scenarios/cinder/create-volume-and-clone.yaml deleted file mode 100755 index 0cf4f48428..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume-and-clone.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- - CinderVolumes.create_volume_and_clone: - - - args: - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - nested_level: 3 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-volume-backup.json b/samples/tasks/scenarios/cinder/create-volume-backup.json deleted file mode 100644 index 76b58a4274..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume-backup.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "CinderVolumes.create_volume_backup": [ - { - "args": { - "size": 1, - "do_delete": true, - "create_volume_kwargs": {}, - "create_backup_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": ["Member"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume-backup.yaml b/samples/tasks/scenarios/cinder/create-volume-backup.yaml deleted file mode 100644 index d6b033c51a..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume-backup.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- - CinderVolumes.create_volume_backup: - - - args: - size: 1 - do_delete: True - create_volume_kwargs: {} - create_backup_kwargs: {} - runner: - type: "constant" - times: 2 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "Member" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-volume-from-snapshot.json b/samples/tasks/scenarios/cinder/create-volume-from-snapshot.json deleted file mode 100755 index 5b5e744b3b..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume-from-snapshot.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "CinderVolumes.create_volume_from_snapshot": [ - { - "args": { - "do_delete": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "volumes": { - "size": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume-from-snapshot.yaml b/samples/tasks/scenarios/cinder/create-volume-from-snapshot.yaml deleted file mode 100755 index e6b36e38df..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume-from-snapshot.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - CinderVolumes.create_volume_from_snapshot: - - - args: - do_delete: true - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - volumes: - size: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.json b/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.json deleted file mode 100644 index a11de209c8..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumeTypes.create_volume_type_add_and_list_type_access": [ - { - "args": { - "description": "rally tests creating types" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.yaml b/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.yaml deleted file mode 100644 index 8afb735b7b..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume-type-add-and-list-type-access.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumeTypes.create_volume_type_add_and_list_type_access: - - - args: - description: "rally tests creating types" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/create-volume.json b/samples/tasks/scenarios/cinder/create-volume.json deleted file mode 100644 index 2bc214e133..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "CinderVolumes.create_volume": [ - { - "args": { - "size": 1 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "size": { - "min": 1, - "max": 5 - } - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/create-volume.yaml b/samples/tasks/scenarios/cinder/create-volume.yaml deleted file mode 100644 index 81703fb19a..0000000000 --- a/samples/tasks/scenarios/cinder/create-volume.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - CinderVolumes.create_volume: - - - args: - size: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - size: - min: 1 - max: 5 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/list-transfers.json b/samples/tasks/scenarios/cinder/list-transfers.json deleted file mode 100644 index 534fb2c552..0000000000 --- a/samples/tasks/scenarios/cinder/list-transfers.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumes.list_transfers": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/list-transfers.yaml b/samples/tasks/scenarios/cinder/list-transfers.yaml deleted file mode 100644 index 566906ec9a..0000000000 --- a/samples/tasks/scenarios/cinder/list-transfers.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumes.list_transfers: - - - args: - detailed: true - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/list-types.json b/samples/tasks/scenarios/cinder/list-types.json deleted file mode 100644 index f108862e8a..0000000000 --- a/samples/tasks/scenarios/cinder/list-types.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "CinderVolumes.list_types": [ - { - "args": { - "is_public": true - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/list-types.yaml b/samples/tasks/scenarios/cinder/list-types.yaml deleted file mode 100644 index 939c634649..0000000000 --- a/samples/tasks/scenarios/cinder/list-types.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - CinderVolumes.list_types: - - - args: - is_public: true - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/list-volumes.json b/samples/tasks/scenarios/cinder/list-volumes.json deleted file mode 100644 index 8267b8ced2..0000000000 --- a/samples/tasks/scenarios/cinder/list-volumes.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "CinderVolumes.list_volumes": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "volumes": { - "size": 1, - "volumes_per_tenant": 4 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/list-volumes.yaml b/samples/tasks/scenarios/cinder/list-volumes.yaml deleted file mode 100644 index bd4d76a155..0000000000 --- a/samples/tasks/scenarios/cinder/list-volumes.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - CinderVolumes.list_volumes: - - - args: - detailed: True - runner: - type: "constant" - times: 100 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - volumes: - size: 1 - volumes_per_tenant: 4 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/cinder/modify-volume-metadata.json b/samples/tasks/scenarios/cinder/modify-volume-metadata.json deleted file mode 100644 index 9c9aafcac3..0000000000 --- a/samples/tasks/scenarios/cinder/modify-volume-metadata.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "CinderVolumes.modify_volume_metadata": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "volumes": { - "size": 1 - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/cinder/modify-volume-metadata.yaml b/samples/tasks/scenarios/cinder/modify-volume-metadata.yaml deleted file mode 100644 index 9443a31d80..0000000000 --- a/samples/tasks/scenarios/cinder/modify-volume-metadata.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - CinderVolumes.modify_volume_metadata: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - volumes: - size: 1 - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-delete-domain.json b/samples/tasks/scenarios/designate/create-and-delete-domain.json deleted file mode 100644 index ac3bb2cfda..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-domain.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "DesignateBasic.create_and_delete_domain": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-domain.yaml b/samples/tasks/scenarios/designate/create-and-delete-domain.yaml deleted file mode 100644 index d30e9ac62e..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-domain.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - DesignateBasic.create_and_delete_domain: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-delete-records.json b/samples/tasks/scenarios/designate/create-and-delete-records.json deleted file mode 100644 index 91839a3939..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-records.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "DesignateBasic.create_and_delete_records": [ - { - "args": { - "records_per_domain": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 2000, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-records.yaml b/samples/tasks/scenarios/designate/create-and-delete-records.yaml deleted file mode 100644 index c6b098773d..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-records.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - DesignateBasic.create_and_delete_records: - - - args: - records_per_domain: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 2000 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-delete-recordsets.json b/samples/tasks/scenarios/designate/create-and-delete-recordsets.json deleted file mode 100644 index 7adc241a1a..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-recordsets.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "DesignateBasic.create_and_delete_recordsets": [ - { - "args": { - "recordsets_per_zone": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 2000, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "zones": { - "zones_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-recordsets.yaml b/samples/tasks/scenarios/designate/create-and-delete-recordsets.yaml deleted file mode 100644 index 05115b2cdd..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-recordsets.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - DesignateBasic.create_and_delete_recordsets: - - - args: - recordsets_per_zone: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 2000 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-delete-server.json b/samples/tasks/scenarios/designate/create-and-delete-server.json deleted file mode 100644 index 08fad7fead..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-server.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "DesignateBasic.create_and_delete_server": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-server.yaml b/samples/tasks/scenarios/designate/create-and-delete-server.yaml deleted file mode 100644 index 9214d5cb96..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-server.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - DesignateBasic.create_and_delete_server: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-delete-zone.json b/samples/tasks/scenarios/designate/create-and-delete-zone.json deleted file mode 100644 index f6b943751a..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-zone.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "DesignateBasic.create_and_delete_zone": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-delete-zone.yaml b/samples/tasks/scenarios/designate/create-and-delete-zone.yaml deleted file mode 100644 index a2c7b240b9..0000000000 --- a/samples/tasks/scenarios/designate/create-and-delete-zone.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - DesignateBasic.create_and_delete_zone: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-list-domain.json b/samples/tasks/scenarios/designate/create-and-list-domain.json deleted file mode 100644 index 792538b6bf..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-domain.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "DesignateBasic.create_and_list_domains": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-domain.yaml b/samples/tasks/scenarios/designate/create-and-list-domain.yaml deleted file mode 100644 index 89caec5446..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-domain.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - DesignateBasic.create_and_list_domains: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-list-records.json b/samples/tasks/scenarios/designate/create-and-list-records.json deleted file mode 100644 index a6d9810507..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-records.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "DesignateBasic.create_and_list_records": [ - { - "args": { - "records_per_domain": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 2000, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-records.yaml b/samples/tasks/scenarios/designate/create-and-list-records.yaml deleted file mode 100644 index d24da341cb..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-records.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - DesignateBasic.create_and_list_records: - - - args: - records_per_domain: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 2000 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-list-recordsets.json b/samples/tasks/scenarios/designate/create-and-list-recordsets.json deleted file mode 100644 index 7bafa102f2..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-recordsets.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "DesignateBasic.create_and_list_recordsets": [ - { - "args": { - "recordsets_per_zone": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 2000, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "zones": { - "zones_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-recordsets.yaml b/samples/tasks/scenarios/designate/create-and-list-recordsets.yaml deleted file mode 100644 index 8396f97c48..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-recordsets.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - DesignateBasic.create_and_list_recordsets: - - - args: - recordsets_per_zone: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 2000 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - zones: - zones_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-list-servers.json b/samples/tasks/scenarios/designate/create-and-list-servers.json deleted file mode 100644 index 6b1bbfb66c..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-servers.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "DesignateBasic.create_and_list_servers": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-servers.yaml b/samples/tasks/scenarios/designate/create-and-list-servers.yaml deleted file mode 100644 index 5ac183e751..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-servers.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - DesignateBasic.create_and_list_servers: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-list-zones.json b/samples/tasks/scenarios/designate/create-and-list-zones.json deleted file mode 100644 index 42b06ec9f8..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-zones.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "DesignateBasic.create_and_list_zones": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-list-zones.yaml b/samples/tasks/scenarios/designate/create-and-list-zones.yaml deleted file mode 100644 index 20a34be13a..0000000000 --- a/samples/tasks/scenarios/designate/create-and-list-zones.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - DesignateBasic.create_and_list_zones: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/create-and-update-domain.json b/samples/tasks/scenarios/designate/create-and-update-domain.json deleted file mode 100644 index 336c3bfdc7..0000000000 --- a/samples/tasks/scenarios/designate/create-and-update-domain.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "DesignateBasic.create_and_update_domain": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "designate": { - "domains": 100, - "domain_recordsets": 500, - "domain_records": 2000, - "recordset_records": 2000 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/create-and-update-domain.yaml b/samples/tasks/scenarios/designate/create-and-update-domain.yaml deleted file mode 100644 index 8a94ff0006..0000000000 --- a/samples/tasks/scenarios/designate/create-and-update-domain.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - DesignateBasic.create_and_update_domain: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - designate: - domains: 100 - domain_recordsets: 500 - domain_records: 2000 - recordset_records: 2000 - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/list-domains.json b/samples/tasks/scenarios/designate/list-domains.json deleted file mode 100644 index f578f86401..0000000000 --- a/samples/tasks/scenarios/designate/list-domains.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "DesignateBasic.list_domains": [ - { - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-domains.yaml b/samples/tasks/scenarios/designate/list-domains.yaml deleted file mode 100644 index 0047ca0af9..0000000000 --- a/samples/tasks/scenarios/designate/list-domains.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - DesignateBasic.list_domains: - - - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/list-records.json b/samples/tasks/scenarios/designate/list-records.json deleted file mode 100644 index d4f0206bda..0000000000 --- a/samples/tasks/scenarios/designate/list-records.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "DesignateBasic.list_records": [ - { - "args": { - "domain_id": "" - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-records.yaml b/samples/tasks/scenarios/designate/list-records.yaml deleted file mode 100644 index c256a21c20..0000000000 --- a/samples/tasks/scenarios/designate/list-records.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - DesignateBasic.list_records: - - - args: - domain_id: - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/list-recordsets.json b/samples/tasks/scenarios/designate/list-recordsets.json deleted file mode 100644 index 911c29f8c8..0000000000 --- a/samples/tasks/scenarios/designate/list-recordsets.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "DesignateBasic.list_recordsets": [ - { - "args": { - "zone_id": "" - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-recordsets.yaml b/samples/tasks/scenarios/designate/list-recordsets.yaml deleted file mode 100644 index 441790ca95..0000000000 --- a/samples/tasks/scenarios/designate/list-recordsets.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - DesignateBasic.list_recordsets: - - - args: - zone_id: - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/list-servers.json b/samples/tasks/scenarios/designate/list-servers.json deleted file mode 100644 index 008cc2a570..0000000000 --- a/samples/tasks/scenarios/designate/list-servers.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "DesignateBasic.list_servers": [ - { - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-servers.yaml b/samples/tasks/scenarios/designate/list-servers.yaml deleted file mode 100644 index 0addbdadbb..0000000000 --- a/samples/tasks/scenarios/designate/list-servers.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - DesignateBasic.list_servers: - - - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/designate/list-zones.json b/samples/tasks/scenarios/designate/list-zones.json deleted file mode 100644 index c7319c50ff..0000000000 --- a/samples/tasks/scenarios/designate/list-zones.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "DesignateBasic.list_zones": [ - { - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/designate/list-zones.yaml b/samples/tasks/scenarios/designate/list-zones.yaml deleted file mode 100644 index 722c5db09c..0000000000 --- a/samples/tasks/scenarios/designate/list-zones.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - DesignateBasic.list_zones: - - - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/dummy/dummy-openstack.json b/samples/tasks/scenarios/dummy/dummy-openstack.json deleted file mode 100644 index 078e694f1d..0000000000 --- a/samples/tasks/scenarios/dummy/dummy-openstack.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "Dummy.openstack": [ - { - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/dummy/dummy-openstack.yaml b/samples/tasks/scenarios/dummy/dummy-openstack.yaml deleted file mode 100644 index 6fb118210d..0000000000 --- a/samples/tasks/scenarios/dummy/dummy-openstack.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - Dummy.openstack: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ec2/boot.json b/samples/tasks/scenarios/ec2/boot.json deleted file mode 100644 index 00fe612896..0000000000 --- a/samples/tasks/scenarios/ec2/boot.json +++ /dev/null @@ -1,31 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "EC2Servers.boot_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ec2/boot.yaml b/samples/tasks/scenarios/ec2/boot.yaml deleted file mode 100644 index be1f415fab..0000000000 --- a/samples/tasks/scenarios/ec2/boot.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - EC2Servers.boot_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ec2/list-servers.json b/samples/tasks/scenarios/ec2/list-servers.json deleted file mode 100644 index f74e13f599..0000000000 --- a/samples/tasks/scenarios/ec2/list-servers.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "EC2Servers.list_servers": [ - { - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "ec2_servers": { - "flavor": { - "name": "m1.tiny" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "servers_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ec2/list-servers.yaml b/samples/tasks/scenarios/ec2/list-servers.yaml deleted file mode 100644 index 7c788eab43..0000000000 --- a/samples/tasks/scenarios/ec2/list-servers.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - EC2Servers.list_servers: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - ec2_servers: - flavor: - name: "m1.tiny" - image: - name: "^cirros.*-disk$" - servers_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/create-and-deactivate-image.json b/samples/tasks/scenarios/glance/create-and-deactivate-image.json deleted file mode 100644 index 8a03758921..0000000000 --- a/samples/tasks/scenarios/glance/create-and-deactivate-image.json +++ /dev/null @@ -1,28 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} -{ - "GlanceImages.create_and_deactivate_image": [ - { - "args": { - "image_location": "{{ image_location }}", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-deactivate-image.yaml b/samples/tasks/scenarios/glance/create-and-deactivate-image.yaml deleted file mode 100644 index ac4eeb1f0d..0000000000 --- a/samples/tasks/scenarios/glance/create-and-deactivate-image.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} ---- - GlanceImages.create_and_deactivate_image: - - - args: - image_location: "{{ image_location }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/create-and-delete-image.json b/samples/tasks/scenarios/glance/create-and-delete-image.json deleted file mode 100644 index 50cde6f6cd..0000000000 --- a/samples/tasks/scenarios/glance/create-and-delete-image.json +++ /dev/null @@ -1,28 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} -{ - "GlanceImages.create_and_delete_image": [ - { - "args": { - "image_location": "{{ image_location }}", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-delete-image.yaml b/samples/tasks/scenarios/glance/create-and-delete-image.yaml deleted file mode 100644 index 11893109a9..0000000000 --- a/samples/tasks/scenarios/glance/create-and-delete-image.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} ---- - GlanceImages.create_and_delete_image: - - - args: - image_location: "{{ image_location }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/create-and-download-image.json b/samples/tasks/scenarios/glance/create-and-download-image.json deleted file mode 100644 index 19d8542e24..0000000000 --- a/samples/tasks/scenarios/glance/create-and-download-image.json +++ /dev/null @@ -1,29 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} -{ - "GlanceImages.create_and_download_image": [ - { - "args": { - "image_location": "{{ image_location }}", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-download-image.yaml b/samples/tasks/scenarios/glance/create-and-download-image.yaml deleted file mode 100644 index 25966c2986..0000000000 --- a/samples/tasks/scenarios/glance/create-and-download-image.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} ---- - GlanceImages.create_and_download_image: - - - args: - image_location: "{{ image_location }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/create-and-get-image.json b/samples/tasks/scenarios/glance/create-and-get-image.json deleted file mode 100644 index 7df38df606..0000000000 --- a/samples/tasks/scenarios/glance/create-and-get-image.json +++ /dev/null @@ -1,28 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} -{ - "GlanceImages.create_and_get_image": [ - { - "args": { - "image_location": "{{ image_location }}", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-get-image.yaml b/samples/tasks/scenarios/glance/create-and-get-image.yaml deleted file mode 100644 index be9815bb54..0000000000 --- a/samples/tasks/scenarios/glance/create-and-get-image.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} ---- - GlanceImages.create_and_get_image: - - - args: - image_location: "{{ image_location }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/create-and-list-image.json b/samples/tasks/scenarios/glance/create-and-list-image.json deleted file mode 100644 index 39f78fbdd6..0000000000 --- a/samples/tasks/scenarios/glance/create-and-list-image.json +++ /dev/null @@ -1,28 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} -{ - "GlanceImages.create_and_list_image": [ - { - "args": { - "image_location": "{{ image_location }}", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-list-image.yaml b/samples/tasks/scenarios/glance/create-and-list-image.yaml deleted file mode 100644 index dda8c1fa86..0000000000 --- a/samples/tasks/scenarios/glance/create-and-list-image.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} ---- - GlanceImages.create_and_list_image: - - - args: - image_location: "{{ image_location }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/create-and-update-image.json b/samples/tasks/scenarios/glance/create-and-update-image.json deleted file mode 100644 index 478afb4c79..0000000000 --- a/samples/tasks/scenarios/glance/create-and-update-image.json +++ /dev/null @@ -1,33 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} -{ - "GlanceImages.create_and_update_image": [ - { - "args": { - "image_location": "{{ image_location }}", - "container_format": "bare", - "disk_format": "qcow2" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "api_versions": { - "glance": { - "version": 2 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-and-update-image.yaml b/samples/tasks/scenarios/glance/create-and-update-image.yaml deleted file mode 100644 index 36071957fd..0000000000 --- a/samples/tasks/scenarios/glance/create-and-update-image.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} ---- - GlanceImages.create_and_update_image: - - - args: - image_location: "{{ image_location }}" - container_format: "bare" - disk_format: "qcow2" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - api_versions: - glance: - version: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/create-image-and-boot-instances.json b/samples/tasks/scenarios/glance/create-image-and-boot-instances.json deleted file mode 100644 index a944b37c25..0000000000 --- a/samples/tasks/scenarios/glance/create-image-and-boot-instances.json +++ /dev/null @@ -1,33 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} -{ - "GlanceImages.create_image_and_boot_instances": [ - { - "args": { - "image_location": "{{ image_location }}", - "container_format": "bare", - "disk_format": "qcow2", - "flavor": { - "name": "{{ flavor_name }}" - }, - "number_instances": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 5 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/create-image-and-boot-instances.yaml b/samples/tasks/scenarios/glance/create-image-and-boot-instances.yaml deleted file mode 100644 index b3edc8ce9a..0000000000 --- a/samples/tasks/scenarios/glance/create-image-and-boot-instances.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} ---- - GlanceImages.create_image_and_boot_instances: - - - args: - image_location: "{{ image_location }}" - container_format: "bare" - disk_format: "qcow2" - flavor: - name: "{{ flavor_name }}" - number_instances: 2 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/glance/list-images.json b/samples/tasks/scenarios/glance/list-images.json deleted file mode 100644 index cb2a46d815..0000000000 --- a/samples/tasks/scenarios/glance/list-images.json +++ /dev/null @@ -1,29 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} -{ - "GlanceImages.list_images": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "images": { - "image_url": "{{ image_location }}", - "disk_format": "qcow2", - "container_format": "bare", - "images_per_tenant": 4 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/glance/list-images.yaml b/samples/tasks/scenarios/glance/list-images.yaml deleted file mode 100644 index 0338ae2379..0000000000 --- a/samples/tasks/scenarios/glance/list-images.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set image_location = image_location or "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" %} ---- - GlanceImages.list_images: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - images: - image_url: "{{ image_location }}" - disk_format: "qcow2" - container_format: "bare" - images_per_tenant: 4 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/gnocchi/create-archive-policy-rule.json b/samples/tasks/scenarios/gnocchi/create-archive-policy-rule.json deleted file mode 100644 index d2236eda78..0000000000 --- a/samples/tasks/scenarios/gnocchi/create-archive-policy-rule.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "GnocchiArchivePolicyRule.create_archive_policy_rule": [ - { - "args": { - "metric_pattern": "cpu_*", - "archive_policy_name": "low" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/gnocchi/create-archive-policy-rule.yaml b/samples/tasks/scenarios/gnocchi/create-archive-policy-rule.yaml deleted file mode 100644 index 8739895243..0000000000 --- a/samples/tasks/scenarios/gnocchi/create-archive-policy-rule.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - GnocchiArchivePolicyRule.create_archive_policy_rule: - - - args: - metric_pattern: "cpu_*" - archive_policy_name: "low" - runner: - type: constant - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/gnocchi/create-delete-archive-policy-rule.json b/samples/tasks/scenarios/gnocchi/create-delete-archive-policy-rule.json deleted file mode 100644 index ba5473f16d..0000000000 --- a/samples/tasks/scenarios/gnocchi/create-delete-archive-policy-rule.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "GnocchiArchivePolicyRule.create_delete_archive_policy_rule": [ - { - "args": { - "metric_pattern": "cpu_*", - "archive_policy_name": "low" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/gnocchi/create-delete-archive-policy-rule.yaml b/samples/tasks/scenarios/gnocchi/create-delete-archive-policy-rule.yaml deleted file mode 100644 index 8cd364e440..0000000000 --- a/samples/tasks/scenarios/gnocchi/create-delete-archive-policy-rule.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - GnocchiArchivePolicyRule.create_delete_archive_policy_rule: - - - args: - metric_pattern: "cpu_*" - archive_policy_name: "low" - runner: - type: constant - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/gnocchi/get-status.json b/samples/tasks/scenarios/gnocchi/get-status.json deleted file mode 100644 index a4a1e4faa2..0000000000 --- a/samples/tasks/scenarios/gnocchi/get-status.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Gnocchi.get_status": [ - { - "args": { - "detailed": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/gnocchi/get-status.yaml b/samples/tasks/scenarios/gnocchi/get-status.yaml deleted file mode 100644 index 0dfdfcddbd..0000000000 --- a/samples/tasks/scenarios/gnocchi/get-status.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - Gnocchi.get_status: - - - args: - detailed: false - runner: - type: constant - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/gnocchi/list-archive-policy-rule.json b/samples/tasks/scenarios/gnocchi/list-archive-policy-rule.json deleted file mode 100644 index 6523c7c252..0000000000 --- a/samples/tasks/scenarios/gnocchi/list-archive-policy-rule.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "GnocchiArchivePolicyRule.list_archive_policy_rule": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/gnocchi/list-archive-policy-rule.yaml b/samples/tasks/scenarios/gnocchi/list-archive-policy-rule.yaml deleted file mode 100644 index ae33459f53..0000000000 --- a/samples/tasks/scenarios/gnocchi/list-archive-policy-rule.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - GnocchiArchivePolicyRule.list_archive_policy_rule: - - - runner: - type: constant - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/gnocchi/list-capabilities.json b/samples/tasks/scenarios/gnocchi/list-capabilities.json deleted file mode 100644 index 060b660fd7..0000000000 --- a/samples/tasks/scenarios/gnocchi/list-capabilities.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "Gnocchi.list_capabilities": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/gnocchi/list-capabilities.yaml b/samples/tasks/scenarios/gnocchi/list-capabilities.yaml deleted file mode 100644 index 0890489b60..0000000000 --- a/samples/tasks/scenarios/gnocchi/list-capabilities.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Gnocchi.list_capabilities: - - - runner: - type: constant - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.json b/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.json deleted file mode 100644 index 8b764fc9f3..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template", - "parameters": { - "num_instances": 2 - }, - "files": ["samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template"] - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.yaml deleted file mode 100644 index 40fcbe5ef7..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-resource-group.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template" - parameters: - num_instances: 2 - files: ["samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template"] - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.json b/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.json deleted file mode 100644 index 5a4391bff3..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.yaml deleted file mode 100644 index e3a6e5c772..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-delay.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.json b/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.json deleted file mode 100644 index bbe7539d2b..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.yaml deleted file mode 100644 index eadc885267..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-neutron.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.json b/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.json deleted file mode 100644 index 00cb1ff3f9..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.yaml deleted file mode 100644 index 74cfee661a..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack-with-volume.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack.json b/samples/tasks/scenarios/heat/create-and-delete-stack.json deleted file mode 100644 index cd6f290e83..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "HeatStacks.create_and_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/default.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-delete-stack.yaml b/samples/tasks/scenarios/heat/create-and-delete-stack.yaml deleted file mode 100644 index 64cafe6a42..0000000000 --- a/samples/tasks/scenarios/heat/create-and-delete-stack.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - HeatStacks.create_and_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/default.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-and-list-stack.json b/samples/tasks/scenarios/heat/create-and-list-stack.json deleted file mode 100644 index b8c6f779a3..0000000000 --- a/samples/tasks/scenarios/heat/create-and-list-stack.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "HeatStacks.create_and_list_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/default.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-and-list-stack.yaml b/samples/tasks/scenarios/heat/create-and-list-stack.yaml deleted file mode 100644 index 9c413159ba..0000000000 --- a/samples/tasks/scenarios/heat/create-and-list-stack.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - HeatStacks.create_and_list_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/default.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-check-delete-stack.json b/samples/tasks/scenarios/heat/create-check-delete-stack.json deleted file mode 100644 index 3b2d6808d1..0000000000 --- a/samples/tasks/scenarios/heat/create-check-delete-stack.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "HeatStacks.create_check_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-check-delete-stack.yaml b/samples/tasks/scenarios/heat/create-check-delete-stack.yaml deleted file mode 100644 index fa5aaf25a0..0000000000 --- a/samples/tasks/scenarios/heat/create-check-delete-stack.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - HeatStacks.create_check_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.json b/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.json deleted file mode 100644 index 24f3b02f6f..0000000000 --- a/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "HeatStacks.create_snapshot_restore_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.yaml b/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.yaml deleted file mode 100644 index e181ecc912..0000000000 --- a/samples/tasks/scenarios/heat/create-snapshot-restore-delete-stack.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - HeatStacks.create_snapshot_restore_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.json b/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.json deleted file mode 100644 index 95269cd07e..0000000000 --- a/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "HeatStacks.create_stack_and_list_output": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ], - - "HeatStacks.create_stack_and_list_output_via_API": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.yaml b/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.yaml deleted file mode 100644 index f47504ac71..0000000000 --- a/samples/tasks/scenarios/heat/create-stack-and-list-output-resource-group.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - HeatStacks.create_stack_and_list_output: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - HeatStacks.create_stack_and_list_output_via_API: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-stack-and-scale.json b/samples/tasks/scenarios/heat/create-stack-and-scale.json deleted file mode 100644 index 11ee82b5af..0000000000 --- a/samples/tasks/scenarios/heat/create-stack-and-scale.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "HeatStacks.create_stack_and_scale": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template", - "output_key": "scaling_url", - "delta": 1 - }, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 3 - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-stack-and-scale.yaml b/samples/tasks/scenarios/heat/create-stack-and-scale.yaml deleted file mode 100644 index b3ae6ef599..0000000000 --- a/samples/tasks/scenarios/heat/create-stack-and-scale.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - HeatStacks.create_stack_and_scale: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template" - output_key: "scaling_url" - delta: 1 - runner: - type: "constant" - times: 3 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.json b/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.json deleted file mode 100644 index fbe50b2dee..0000000000 --- a/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "HeatStacks.create_stack_and_show_output": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template", - "output_key": "val1" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ], - - "HeatStacks.create_stack_and_show_output_via_API": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template", - "output_key": "val1" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.yaml b/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.yaml deleted file mode 100644 index 233f82cc08..0000000000 --- a/samples/tasks/scenarios/heat/create-stack-and-show-output-resource-group.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- - HeatStacks.create_stack_and_show_output: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - output_key: "val1" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - HeatStacks.create_stack_and_show_output_via_API: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template" - output_key: "val1" - runner: - type: "constant" - times: 5 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.json b/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.json deleted file mode 100644 index 2c28366de9..0000000000 --- a/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "HeatStacks.create_suspend_resume_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.yaml b/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.yaml deleted file mode 100644 index f26f0afae2..0000000000 --- a/samples/tasks/scenarios/heat/create-suspend-resume-delete-stack.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - HeatStacks.create_suspend_resume_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.json b/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.json deleted file mode 100644 index cd939b56f8..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.yaml deleted file mode 100644 index b3eec6a37e..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-add-res.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.json b/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.json deleted file mode 100644 index f5b1f0ac38..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.yaml deleted file mode 100644 index 2998e1b12d..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-del-res.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-increase.json b/samples/tasks/scenarios/heat/create-update-delete-stack-increase.json deleted file mode 100644 index 3ee04aaede..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-increase.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-increase.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-increase.yaml deleted file mode 100644 index 03a98aba40..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-increase.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.json b/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.json deleted file mode 100644 index 17d5ca883d..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.yaml deleted file mode 100644 index 82c4459bc7..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-inplace.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.json b/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.json deleted file mode 100644 index 19cfa2e9c8..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/resource-group.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.yaml deleted file mode 100644 index 9bb85a0c4b..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-reduce.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/resource-group.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-replace.json b/samples/tasks/scenarios/heat/create-update-delete-stack-replace.json deleted file mode 100644 index c96f7d43f3..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-replace.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "HeatStacks.create_update_delete_stack": [ - { - "args": { - "template_path": "samples/tasks/scenarios/heat/templates/random-strings.yaml.template", - "updated_template_path": "samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/create-update-delete-stack-replace.yaml b/samples/tasks/scenarios/heat/create-update-delete-stack-replace.yaml deleted file mode 100644 index 8b34b5240d..0000000000 --- a/samples/tasks/scenarios/heat/create-update-delete-stack-replace.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - HeatStacks.create_update_delete_stack: - - - args: - template_path: "samples/tasks/scenarios/heat/templates/random-strings.yaml.template" - updated_template_path: "samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/list-stack-and-event.json b/samples/tasks/scenarios/heat/list-stack-and-event.json deleted file mode 100644 index cc3f7863ba..0000000000 --- a/samples/tasks/scenarios/heat/list-stack-and-event.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "HeatStacks.list_stacks_and_events": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "stacks": { - "stacks_per_tenant": 2, - "resources_per_stack": 10 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/list-stack-and-event.yaml b/samples/tasks/scenarios/heat/list-stack-and-event.yaml deleted file mode 100644 index c7b592871e..0000000000 --- a/samples/tasks/scenarios/heat/list-stack-and-event.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - HeatStacks.list_stacks_and_events: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - stacks: - stacks_per_tenant: 2 - resources_per_stack: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/list-stack-and-resources.json b/samples/tasks/scenarios/heat/list-stack-and-resources.json deleted file mode 100644 index 0b83591e02..0000000000 --- a/samples/tasks/scenarios/heat/list-stack-and-resources.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "HeatStacks.list_stacks_and_resources": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "stacks": { - "stacks_per_tenant": 2, - "resources_per_stack": 10 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/heat/list-stack-and-resources.yaml b/samples/tasks/scenarios/heat/list-stack-and-resources.yaml deleted file mode 100644 index 5b95509692..0000000000 --- a/samples/tasks/scenarios/heat/list-stack-and-resources.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - HeatStacks.list_stacks_and_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - stacks: - stacks_per_tenant: 2 - resources_per_stack: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template b/samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template deleted file mode 100644 index 6c9892b411..0000000000 --- a/samples/tasks/scenarios/heat/templates/autoscaling-group.yaml.template +++ /dev/null @@ -1,46 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - flavor: - type: string - default: m1.tiny - constraints: - - custom_constraint: nova.flavor - image: - type: string - default: cirros-0.3.5-x86_64-disk - constraints: - - custom_constraint: glance.image - scaling_adjustment: - type: number - default: 1 - max_size: - type: number - default: 5 - constraints: - - range: {min: 1} - - -resources: - asg: - type: OS::Heat::AutoScalingGroup - properties: - resource: - type: OS::Nova::Server - properties: - image: { get_param: image } - flavor: { get_param: flavor } - min_size: 1 - desired_capacity: 3 - max_size: { get_param: max_size } - - scaling_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: {get_resource: asg} - scaling_adjustment: { get_param: scaling_adjustment } - -outputs: - scaling_url: - value: {get_attr: [scaling_policy, alarm_url]} diff --git a/samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template b/samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template deleted file mode 100644 index a22487e339..0000000000 --- a/samples/tasks/scenarios/heat/templates/autoscaling-policy.yaml.template +++ /dev/null @@ -1,17 +0,0 @@ -heat_template_version: 2013-05-23 - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: 1 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/default.yaml.template b/samples/tasks/scenarios/heat/templates/default.yaml.template deleted file mode 100644 index eb4f2f2dd8..0000000000 --- a/samples/tasks/scenarios/heat/templates/default.yaml.template +++ /dev/null @@ -1 +0,0 @@ -heat_template_version: 2014-10-16 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/random-strings.yaml.template b/samples/tasks/scenarios/heat/templates/random-strings.yaml.template deleted file mode 100644 index 2dd676c118..0000000000 --- a/samples/tasks/scenarios/heat/templates/random-strings.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template b/samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template deleted file mode 100644 index 4a15ca89c6..0000000000 --- a/samples/tasks/scenarios/heat/templates/resource-group-server-with-volume.yaml.template +++ /dev/null @@ -1,44 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template that creates a resource group with servers and volumes. - The template allows to create a lot of nested stacks with standard - configuration: nova instance, cinder volume attached to that instance - -parameters: - - num_instances: - type: number - description: number of instances that should be created in resource group - constraints: - - range: {min: 1} - instance_image: - type: string - default: cirros-0.3.5-x86_64-disk - instance_volume_size: - type: number - description: Size of volume to attach to instance - default: 1 - constraints: - - range: {min: 1, max: 1024} - instance_flavor: - type: string - description: Type of the instance to be created. - default: m1.tiny - instance_availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - -resources: - group_of_volumes: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: num_instances} - resource_def: - type: templates/server-with-volume.yaml.template - properties: - image: {get_param: instance_image} - volume_size: {get_param: instance_volume_size} - flavor: {get_param: instance_flavor} - availability_zone: {get_param: instance_availability_zone} diff --git a/samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template b/samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template deleted file mode 100644 index 234e4237ff..0000000000 --- a/samples/tasks/scenarios/heat/templates/resource-group-with-constraint.yaml.template +++ /dev/null @@ -1,21 +0,0 @@ -heat_template_version: 2013-05-23 - -description: Template for testing caching. - -parameters: - count: - type: number - default: 40 - delay: - type: number - default: 0.1 - -resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: {get_param: count} - resource_def: - type: OS::Heat::TestResource - properties: - constraint_prop_secs: {get_param: delay} diff --git a/samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template b/samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template deleted file mode 100644 index f47d03ccc1..0000000000 --- a/samples/tasks/scenarios/heat/templates/resource-group-with-outputs.yaml.template +++ /dev/null @@ -1,37 +0,0 @@ -heat_template_version: 2013-05-23 -parameters: - attr_wait_secs: - type: number - default: 0.5 - -resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: 10 - resource_def: - type: OS::Heat::TestResource - properties: - attr_wait_secs: {get_param: attr_wait_secs} - -outputs: - val1: - value: {get_attr: [rg, resource.0.output]} - val2: - value: {get_attr: [rg, resource.1.output]} - val3: - value: {get_attr: [rg, resource.2.output]} - val4: - value: {get_attr: [rg, resource.3.output]} - val5: - value: {get_attr: [rg, resource.4.output]} - val6: - value: {get_attr: [rg, resource.5.output]} - val7: - value: {get_attr: [rg, resource.6.output]} - val8: - value: {get_attr: [rg, resource.7.output]} - val9: - value: {get_attr: [rg, resource.8.output]} - val10: - value: {get_attr: [rg, resource.9.output]} \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/resource-group.yaml.template b/samples/tasks/scenarios/heat/templates/resource-group.yaml.template deleted file mode 100644 index b3f505fa67..0000000000 --- a/samples/tasks/scenarios/heat/templates/resource-group.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 2 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template b/samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template deleted file mode 100644 index 0e344fc069..0000000000 --- a/samples/tasks/scenarios/heat/templates/server-with-ports.yaml.template +++ /dev/null @@ -1,64 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - public_net: - type: string - default: public - image: - type: string - default: cirros-0.3.5-x86_64-disk - flavor: - type: string - default: m1.tiny - cidr: - type: string - default: 11.11.11.0/24 - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - networks: - - port: { get_resource: server_port } - - router: - type: OS::Neutron::Router - properties: - external_gateway_info: - network: {get_param: public_net} - - router_interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: private_subnet } - - private_net: - type: OS::Neutron::Net - - private_subnet: - type: OS::Neutron::Subnet - properties: - network: { get_resource: private_net } - cidr: {get_param: cidr} - - port_security_group: - type: OS::Neutron::SecurityGroup - properties: - name: default_port_security_group - description: > - Default security group assigned to port. The neutron default group is not - used because neutron creates several groups with the same name=default and - nova cannot chooses which one should it use. - - server_port: - type: OS::Neutron::Port - properties: - network: {get_resource: private_net} - fixed_ips: - - subnet: { get_resource: private_subnet } - security_groups: - - { get_resource: port_security_group } diff --git a/samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template b/samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template deleted file mode 100644 index 6e65cec720..0000000000 --- a/samples/tasks/scenarios/heat/templates/server-with-volume.yaml.template +++ /dev/null @@ -1,39 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - image: - type: string - default: cirros-0.3.5-x86_64-disk - flavor: - type: string - default: m1.tiny - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server} - mountpoint: /dev/vdc diff --git a/samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template b/samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template deleted file mode 100644 index cf34879ca7..0000000000 --- a/samples/tasks/scenarios/heat/templates/updated-autoscaling-policy-inplace.yaml.template +++ /dev/null @@ -1,23 +0,0 @@ -heat_template_version: 2013-05-23 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates resource parameters without resource re-creation(replacement) - in the stack defined by autoscaling_policy.yaml.template. It allows to measure - performance of "pure" resource update operation only. - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: -1 \ No newline at end of file diff --git a/samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template b/samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template deleted file mode 100644 index 03f9a885d5..0000000000 --- a/samples/tasks/scenarios/heat/templates/updated-random-strings-add.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates the stack defined by random-strings.yaml.template with additional resource. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_three: - type: OS::Heat::RandomString - properties: - length: 20 diff --git a/samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template b/samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template deleted file mode 100644 index 414d90d583..0000000000 --- a/samples/tasks/scenarios/heat/templates/updated-random-strings-delete.yaml.template +++ /dev/null @@ -1,11 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by random-strings.yaml.template. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 diff --git a/samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template b/samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template deleted file mode 100644 index 780fcc168e..0000000000 --- a/samples/tasks/scenarios/heat/templates/updated-random-strings-replace.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by - random-strings.yaml.template and re-creates it with the updated parameters - (so-called update-replace). That happens because some parameters cannot be - changed without resource re-creation. The template allows to measure performance - of update-replace operation. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 40 diff --git a/samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template b/samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template deleted file mode 100644 index 94bc271f79..0000000000 --- a/samples/tasks/scenarios/heat/templates/updated-resource-group-increase.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource-group.yaml.template - and adds children resources to that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 3 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 diff --git a/samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template b/samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template deleted file mode 100644 index a076224a80..0000000000 --- a/samples/tasks/scenarios/heat/templates/updated-resource-group-reduce.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource-group.yaml.template - and deletes children resources from that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 1 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 diff --git a/samples/tasks/scenarios/ironic/create-and-delete-node.json b/samples/tasks/scenarios/ironic/create-and-delete-node.json deleted file mode 100644 index f68d16192b..0000000000 --- a/samples/tasks/scenarios/ironic/create-and-delete-node.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "IronicNodes.create_and_delete_node": [ - { - "args": { - "driver": "pxe_ssh", - "properties": { - "capabilities": "boot_option:local" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ironic/create-and-delete-node.yaml b/samples/tasks/scenarios/ironic/create-and-delete-node.yaml deleted file mode 100644 index 579d49ce65..0000000000 --- a/samples/tasks/scenarios/ironic/create-and-delete-node.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - IronicNodes.create_and_delete_node: - - - args: - driver: "pxe_ssh" - properties: - capabilities: "boot_option:local" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 5 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/ironic/create-and-list-node.json b/samples/tasks/scenarios/ironic/create-and-list-node.json deleted file mode 100644 index d6e85b70c4..0000000000 --- a/samples/tasks/scenarios/ironic/create-and-list-node.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "IronicNodes.create_and_list_node": [ - { - "args": { - "driver": "pxe_ssh" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/ironic/create-and-list-node.yaml b/samples/tasks/scenarios/ironic/create-and-list-node.yaml deleted file mode 100644 index a5aa419b51..0000000000 --- a/samples/tasks/scenarios/ironic/create-and-list-node.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - IronicNodes.create_and_list_node: - - - args: - driver: "pxe_ssh" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 5 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/add-and-remove-user-role.json b/samples/tasks/scenarios/keystone/add-and-remove-user-role.json deleted file mode 100644 index ba8625282e..0000000000 --- a/samples/tasks/scenarios/keystone/add-and-remove-user-role.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "KeystoneBasic.add_and_remove_user_role": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/add-and-remove-user-role.yaml b/samples/tasks/scenarios/keystone/add-and-remove-user-role.yaml deleted file mode 100644 index a58ee20de1..0000000000 --- a/samples/tasks/scenarios/keystone/add-and-remove-user-role.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - KeystoneBasic.add_and_remove_user_role: - - - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.json b/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.json deleted file mode 100644 index 564bc897df..0000000000 --- a/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.authenticate_user_and_validate_token": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.yaml b/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.yaml deleted file mode 100644 index 86e86ca996..0000000000 --- a/samples/tasks/scenarios/keystone/authenticate-user-and-validate-token.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.authenticate_user_and_validate_token: - - - args: {} - runner: - type: "constant" - times: 20 - concurrency: 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.json b/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.json deleted file mode 100644 index 87890e99c3..0000000000 --- a/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "KeystoneBasic.create_add_and_list_user_roles": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.yaml b/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.yaml deleted file mode 100644 index c9d0443ca0..0000000000 --- a/samples/tasks/scenarios/keystone/create-add-and-list-user-roles.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - KeystoneBasic.create_add_and_list_user_roles: - - - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.json b/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.json deleted file mode 100644 index 4841c279f7..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "KeystoneBasic.create_and_delete_ec2credential": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.yaml b/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.yaml deleted file mode 100644 index 617df34813..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-ec2credential.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - KeystoneBasic.create_and_delete_ec2credential: - - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-delete-role.json b/samples/tasks/scenarios/keystone/create-and-delete-role.json deleted file mode 100644 index 5dad7e0676..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-role.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "KeystoneBasic.create_and_delete_role": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-delete-role.yaml b/samples/tasks/scenarios/keystone/create-and-delete-role.yaml deleted file mode 100644 index 2a8495444e..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-role.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - KeystoneBasic.create_and_delete_role: - - - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-delete-service.json b/samples/tasks/scenarios/keystone/create-and-delete-service.json deleted file mode 100644 index d5f5a2561a..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-service.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "KeystoneBasic.create_and_delete_service": [ - { - "args": { - "service_type": "Rally_test_type", - "description": "test_description" - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-delete-service.yaml b/samples/tasks/scenarios/keystone/create-and-delete-service.yaml deleted file mode 100644 index 42c213aaa6..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - KeystoneBasic.create_and_delete_service: - - - args: - service_type: "Rally_test_type" - description: "test_description" - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-delete-user.json b/samples/tasks/scenarios/keystone/create-and-delete-user.json deleted file mode 100644 index b1a487ec21..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-user.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_delete_user": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-delete-user.yaml b/samples/tasks/scenarios/keystone/create-and-delete-user.yaml deleted file mode 100644 index 00c4825173..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-delete-user.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.create_delete_user: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-get-role.json b/samples/tasks/scenarios/keystone/create-and-get-role.json deleted file mode 100644 index e2c7b3a59b..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-get-role.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "KeystoneBasic.create_and_get_role": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-get-role.yaml b/samples/tasks/scenarios/keystone/create-and-get-role.yaml deleted file mode 100644 index 88cd1226da..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-get-role.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - KeystoneBasic.create_and_get_role: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.json b/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.json deleted file mode 100644 index f899771fb8..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "KeystoneBasic.create_and_list_ec2credentials": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.yaml b/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.yaml deleted file mode 100644 index 11ef418564..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-ec2credentials.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - KeystoneBasic.create_and_list_ec2credentials: - - - - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-list-roles.json b/samples/tasks/scenarios/keystone/create-and-list-roles.json deleted file mode 100644 index 6633a3ac1f..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-roles.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "KeystoneBasic.create_and_list_roles": [ - { - "args": { - "create_role_kwargs": {}, - "list_role_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-roles.yaml b/samples/tasks/scenarios/keystone/create-and-list-roles.yaml deleted file mode 100644 index 09dce1f868..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-roles.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - KeystoneBasic.create_and_list_roles: - - - args: - create_role_kwargs: {} - list_role_kwargs: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-list-services.json b/samples/tasks/scenarios/keystone/create-and-list-services.json deleted file mode 100644 index b08fef8e40..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-services.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "KeystoneBasic.create_and_list_services": [ - { - "args": { - "service_type": "Rally_test_type", - "description": "test_description" - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-services.yaml b/samples/tasks/scenarios/keystone/create-and-list-services.yaml deleted file mode 100644 index f1b48f1ac1..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-services.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - KeystoneBasic.create_and_list_services: - - - args: - service_type: "Rally_test_type" - description: "test_description" - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-list-tenants.json b/samples/tasks/scenarios/keystone/create-and-list-tenants.json deleted file mode 100644 index 4bd1637677..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-tenants.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_and_list_tenants": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-tenants.yaml b/samples/tasks/scenarios/keystone/create-and-list-tenants.yaml deleted file mode 100644 index d9610eee55..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-tenants.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.create_and_list_tenants: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-list-users.json b/samples/tasks/scenarios/keystone/create-and-list-users.json deleted file mode 100644 index b1e581226e..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-users.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_and_list_users": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-list-users.yaml b/samples/tasks/scenarios/keystone/create-and-list-users.yaml deleted file mode 100644 index 5489af7da5..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-list-users.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.create_and_list_users: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-and-update-user.json b/samples/tasks/scenarios/keystone/create-and-update-user.json deleted file mode 100644 index 04a74de468..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-update-user.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "KeystoneBasic.create_and_update_user": [ - { - "args": { - "create_user_kwargs": {}, - "update_user_kwargs": { - "enabled": false - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-and-update-user.yaml b/samples/tasks/scenarios/keystone/create-and-update-user.yaml deleted file mode 100644 index d9a7d1b94b..0000000000 --- a/samples/tasks/scenarios/keystone/create-and-update-user.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - KeystoneBasic.create_and_update_user: - - - args: - create_user_kwargs: {} - update_user_kwargs: - enabled: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-tenant-with-users.json b/samples/tasks/scenarios/keystone/create-tenant-with-users.json deleted file mode 100644 index 83b0d9d098..0000000000 --- a/samples/tasks/scenarios/keystone/create-tenant-with-users.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "KeystoneBasic.create_tenant_with_users": [ - { - "args": { - "users_per_tenant": 10 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-tenant-with-users.yaml b/samples/tasks/scenarios/keystone/create-tenant-with-users.yaml deleted file mode 100644 index 9afe4db096..0000000000 --- a/samples/tasks/scenarios/keystone/create-tenant-with-users.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - KeystoneBasic.create_tenant_with_users: - - - args: - users_per_tenant: 10 - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-tenant.json b/samples/tasks/scenarios/keystone/create-tenant.json deleted file mode 100644 index e2acdc32a4..0000000000 --- a/samples/tasks/scenarios/keystone/create-tenant.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_tenant": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-tenant.yaml b/samples/tasks/scenarios/keystone/create-tenant.yaml deleted file mode 100644 index e903719355..0000000000 --- a/samples/tasks/scenarios/keystone/create-tenant.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.create_tenant: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.json b/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.json deleted file mode 100644 index ce93594eb5..0000000000 --- a/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_update_and_delete_tenant": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.yaml b/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.yaml deleted file mode 100644 index 122a65a805..0000000000 --- a/samples/tasks/scenarios/keystone/create-update-and-delete-tenant.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.create_update_and_delete_tenant: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.json b/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.json deleted file mode 100644 index 309446baa3..0000000000 --- a/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "KeystoneBasic.create_user_set_enabled_and_delete": [ - { - "args": { - "enabled": true - }, - "runner": { - "type": "constant", - "concurrency": 10, - "times": 100 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "enabled": false - }, - "runner": { - "type": "constant", - "concurrency": 10, - "times": 100 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.yaml b/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.yaml deleted file mode 100644 index cfc9087932..0000000000 --- a/samples/tasks/scenarios/keystone/create-user-set-enabled-and-delete.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - KeystoneBasic.create_user_set_enabled_and_delete: - - - args: - enabled: true - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 - - - args: - enabled: false - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-user-update-password.json b/samples/tasks/scenarios/keystone/create-user-update-password.json deleted file mode 100644 index 06ef173f5e..0000000000 --- a/samples/tasks/scenarios/keystone/create-user-update-password.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_user_update_password": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-user-update-password.yaml b/samples/tasks/scenarios/keystone/create-user-update-password.yaml deleted file mode 100644 index 6bbddb6edc..0000000000 --- a/samples/tasks/scenarios/keystone/create-user-update-password.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.create_user_update_password: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/create-user.json b/samples/tasks/scenarios/keystone/create-user.json deleted file mode 100644 index 0d1b44fe8c..0000000000 --- a/samples/tasks/scenarios/keystone/create-user.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "KeystoneBasic.create_user": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/create-user.yaml b/samples/tasks/scenarios/keystone/create-user.yaml deleted file mode 100644 index 8f939f2027..0000000000 --- a/samples/tasks/scenarios/keystone/create-user.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - KeystoneBasic.create_user: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/keystone/get-entities.json b/samples/tasks/scenarios/keystone/get-entities.json deleted file mode 100644 index 2d2db92afe..0000000000 --- a/samples/tasks/scenarios/keystone/get-entities.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "KeystoneBasic.get_entities": [ - { - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/keystone/get-entities.yaml b/samples/tasks/scenarios/keystone/get-entities.yaml deleted file mode 100644 index b4692b1ffa..0000000000 --- a/samples/tasks/scenarios/keystone/get-entities.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - KeystoneBasic.get_entities: - - - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/magnum/artifacts/nginx.yaml.k8s b/samples/tasks/scenarios/magnum/artifacts/nginx.yaml.k8s deleted file mode 100644 index a26e23a77d..0000000000 --- a/samples/tasks/scenarios/magnum/artifacts/nginx.yaml.k8s +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: nginx-1 - labels: - app: nginx-1 -spec: - containers: - - name: nginx-1 - image: nginx - ports: - - containerPort: 80 diff --git a/samples/tasks/scenarios/magnum/artifacts/rc_nginx.yaml.k8s b/samples/tasks/scenarios/magnum/artifacts/rc_nginx.yaml.k8s deleted file mode 100644 index 013a7a041f..0000000000 --- a/samples/tasks/scenarios/magnum/artifacts/rc_nginx.yaml.k8s +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: nginx-controller -spec: - replicas: 2 - # selector identifies the set of pods that this - # replication controller is responsible for managing - selector: - name: nginx - # template defines the 'cookie cutter' used for creating - # new pods when necessary - template: - metadata: - labels: - # Important: these labels need to match the selector above - # The api server enforces this constraint. - name: nginx - spec: - containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 diff --git a/samples/tasks/scenarios/magnum/create-and-list-clusters.json b/samples/tasks/scenarios/magnum/create-and-list-clusters.json deleted file mode 100644 index 6df0cc50ff..0000000000 --- a/samples/tasks/scenarios/magnum/create-and-list-clusters.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "MagnumClusters.create_and_list_clusters": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "node_count": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "node_count": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "node_count": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/create-and-list-clusters.yaml b/samples/tasks/scenarios/magnum/create-and-list-clusters.yaml deleted file mode 100644 index f83095e5e3..0000000000 --- a/samples/tasks/scenarios/magnum/create-and-list-clusters.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- - MagnumClusters.create_and_list_clusters: - - - args: - node_count: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - sla: - failure_rate: - max: 0 - - - args: - node_count: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - sla: - failure_rate: - max: 0 - - - args: - node_count: 1 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/magnum/create-pods.json b/samples/tasks/scenarios/magnum/create-pods.json deleted file mode 100644 index 5af43a28b3..0000000000 --- a/samples/tasks/scenarios/magnum/create-pods.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "K8sPods.create_pods": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "manifests": ["artifacts/nginx.yaml.k8s"] - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel" - }, - "clusters": { - "node_count": 2 - }, - "ca_certs": { - "directory": "/home/stack" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "manifests": ["artifacts/nginx.yaml.k8s"] - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel", - "tls_disabled": true - }, - "clusters": { - "node_count": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/create-pods.yaml b/samples/tasks/scenarios/magnum/create-pods.yaml deleted file mode 100644 index 763291f307..0000000000 --- a/samples/tasks/scenarios/magnum/create-pods.yaml +++ /dev/null @@ -1,53 +0,0 @@ ---- - K8sPods.create_pods: - - - args: - manifests: ["artifacts/nginx.yaml.k8s"] - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - ca_certs: - directory: "/home/stack" - sla: - failure_rate: - max: 0 - - - args: - manifests: ["artifacts/nginx.yaml.k8s"] - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - tls_disabled: True - clusters: - node_count: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/magnum/create-rcs.json b/samples/tasks/scenarios/magnum/create-rcs.json deleted file mode 100644 index afc2745018..0000000000 --- a/samples/tasks/scenarios/magnum/create-rcs.json +++ /dev/null @@ -1,74 +0,0 @@ -{ - "K8sPods.create_rcs": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "manifests": ["artifacts/rc_nginx.yaml.k8s"] - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel" - }, - "clusters": { - "node_count": 2 - }, - "ca_certs": { - "directory": "/home/stack" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "manifests": ["artifacts/rc_nginx.yaml.k8s"] - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel", - "tls_disabled": true - }, - "clusters": { - "node_count": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/create-rcs.yaml b/samples/tasks/scenarios/magnum/create-rcs.yaml deleted file mode 100644 index 5df551c349..0000000000 --- a/samples/tasks/scenarios/magnum/create-rcs.yaml +++ /dev/null @@ -1,53 +0,0 @@ ---- - K8sPods.create_rcs: - - - args: - manifests: ["artifacts/rc_nginx.yaml.k8s"] - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - ca_certs: - directory: "/home/stack" - sla: - failure_rate: - max: 0 - - - args: - manifests: ["artifacts/rc_nginx.yaml.k8s"] - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - tls_disabled: True - clusters: - node_count: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/magnum/list-cluster-templates.json b/samples/tasks/scenarios/magnum/list-cluster-templates.json deleted file mode 100644 index 125753190d..0000000000 --- a/samples/tasks/scenarios/magnum/list-cluster-templates.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "MagnumClusterTemplates.list_cluster_templates": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/list-cluster-templates.yaml b/samples/tasks/scenarios/magnum/list-cluster-templates.yaml deleted file mode 100644 index 86c7f4cda4..0000000000 --- a/samples/tasks/scenarios/magnum/list-cluster-templates.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- - MagnumClusterTemplates.list_cluster_templates: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - sla: - failure_rate: - max: 0 - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - sla: - failure_rate: - max: 0 - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/magnum/list-clusters.json b/samples/tasks/scenarios/magnum/list-clusters.json deleted file mode 100644 index 3538dbc68d..0000000000 --- a/samples/tasks/scenarios/magnum/list-clusters.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "MagnumClusters.list_clusters": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "swarm", - "image_id": "fedora-atomic-latest", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "clusters": { - "node_count": 2 - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "coe": "mesos", - "image_id": "ubuntu-mesos", - "network_driver": "docker" - }, - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/list-clusters.yaml b/samples/tasks/scenarios/magnum/list-clusters.yaml deleted file mode 100644 index 6a35230fd5..0000000000 --- a/samples/tasks/scenarios/magnum/list-clusters.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- - MagnumClusters.list_clusters: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - sla: - failure_rate: - max: 0 - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "docker" - coe: "swarm" - clusters: - node_count: 2 - sla: - failure_rate: - max: 0 - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "ubuntu-mesos" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - network_driver: "docker" - coe: "mesos" - clusters: - node_count: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/magnum/list-pods.json b/samples/tasks/scenarios/magnum/list-pods.json deleted file mode 100644 index d068ab45c1..0000000000 --- a/samples/tasks/scenarios/magnum/list-pods.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "K8sPods.list_pods": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel" - }, - "clusters": { - "node_count": 2 - }, - "ca_certs": { - "directory": "" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "cluster_templates": { - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "network_driver": "flannel", - "tls_disabled": true - }, - "clusters": { - "node_count": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/magnum/list-pods.yaml b/samples/tasks/scenarios/magnum/list-pods.yaml deleted file mode 100644 index 9d85d5f390..0000000000 --- a/samples/tasks/scenarios/magnum/list-pods.yaml +++ /dev/null @@ -1,49 +0,0 @@ ---- - K8sPods.list_pods: - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - clusters: - node_count: 2 - ca_certs: - directory: "" - sla: - failure_rate: - max: 0 - - - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - cluster_templates: - image_id: "fedora-atomic-latest" - external_network_id: "public" - dns_nameserver: "8.8.8.8" - flavor_id: "m1.small" - docker_volume_size: 5 - network_driver: "flannel" - coe: "kubernetes" - tls_disabled: True - clusters: - node_count: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/attach-security-service-to-share-network.json b/samples/tasks/scenarios/manila/attach-security-service-to-share-network.json deleted file mode 100644 index afde3a49c8..0000000000 --- a/samples/tasks/scenarios/manila/attach-security-service-to-share-network.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "ManilaShares.attach_security_service_to_share_network": [ - { - "args": { - "security_service_type": "active_directory" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "quotas": { - "manila": { - "share_networks": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/attach-security-service-to-share-network.yaml b/samples/tasks/scenarios/manila/attach-security-service-to-share-network.yaml deleted file mode 100644 index 3f0bd07f5b..0000000000 --- a/samples/tasks/scenarios/manila/attach-security-service-to-share-network.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - ManilaShares.attach_security_service_to_share_network: - - - args: - security_service_type: "active_directory" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - quotas: - manila: - share_networks: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-security-service-and-delete.json b/samples/tasks/scenarios/manila/create-security-service-and-delete.json deleted file mode 100644 index 4658d51f45..0000000000 --- a/samples/tasks/scenarios/manila/create-security-service-and-delete.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "ManilaShares.create_security_service_and_delete": [ - { - "args": { - "security_service_type": "active_directory", - "dns_ip": "fake_dns_ip", - "server": "fake-server", - "domain": "fake_domain", - "user": "fake_user", - "password": "fake_password", - "name": "fake_name", - "description": "fake_description" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-security-service-and-delete.yaml b/samples/tasks/scenarios/manila/create-security-service-and-delete.yaml deleted file mode 100644 index 562a3319e2..0000000000 --- a/samples/tasks/scenarios/manila/create-security-service-and-delete.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - ManilaShares.create_security_service_and_delete: - - - args: - security_service_type: "active_directory" - dns_ip: "fake_dns_ip" - server: "fake-server" - domain: "fake_domain" - user: "fake_user" - password: "fake_password" - name: "fake_name" - description: "fake_description" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-allow-and-deny-access.json b/samples/tasks/scenarios/manila/create-share-allow-and-deny-access.json deleted file mode 100644 index 88f0578963..0000000000 --- a/samples/tasks/scenarios/manila/create-share-allow-and-deny-access.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "ManilaShares.create_share_then_allow_and_deny_access": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "access": "127.0.0.1", - "access_type": "ip" - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-allow-and-deny-access.yaml b/samples/tasks/scenarios/manila/create-share-allow-and-deny-access.yaml deleted file mode 100644 index c98bac6079..0000000000 --- a/samples/tasks/scenarios/manila/create-share-allow-and-deny-access.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - ManilaShares.create_share_then_allow_and_deny_access: - - - args: - share_proto: "nfs" - size: 1 - access: "127.0.0.1" - access_type: "ip" - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-and-extend.json b/samples/tasks/scenarios/manila/create-share-and-extend.json deleted file mode 100644 index 9610295929..0000000000 --- a/samples/tasks/scenarios/manila/create-share-and-extend.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "ManilaShares.create_and_extend_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "new_size": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-and-extend.yaml b/samples/tasks/scenarios/manila/create-share-and-extend.yaml deleted file mode 100644 index 794b0079c3..0000000000 --- a/samples/tasks/scenarios/manila/create-share-and-extend.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - ManilaShares.create_and_extend_share: - - - args: - share_proto: "nfs" - size: 1 - new_size: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-and-shrink.json b/samples/tasks/scenarios/manila/create-share-and-shrink.json deleted file mode 100644 index d9a95b0b15..0000000000 --- a/samples/tasks/scenarios/manila/create-share-and-shrink.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "ManilaShares.create_and_shrink_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "new_size": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-and-shrink.yaml b/samples/tasks/scenarios/manila/create-share-and-shrink.yaml deleted file mode 100644 index abac632c6a..0000000000 --- a/samples/tasks/scenarios/manila/create-share-and-shrink.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - ManilaShares.create_and_shrink_share: - - - args: - share_proto: "nfs" - size: 1 - new_size: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-network-and-delete.json b/samples/tasks/scenarios/manila/create-share-network-and-delete.json deleted file mode 100644 index d37efe160c..0000000000 --- a/samples/tasks/scenarios/manila/create-share-network-and-delete.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "ManilaShares.create_share_network_and_delete": [ - { - "args": { - "name": "rally" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "manila": { - "share_networks": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-network-and-delete.yaml b/samples/tasks/scenarios/manila/create-share-network-and-delete.yaml deleted file mode 100644 index 1c35533ec8..0000000000 --- a/samples/tasks/scenarios/manila/create-share-network-and-delete.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - ManilaShares.create_share_network_and_delete: - - - args: - name: "rally" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - manila: - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-network-and-list.json b/samples/tasks/scenarios/manila/create-share-network-and-list.json deleted file mode 100644 index 5cf61ac3da..0000000000 --- a/samples/tasks/scenarios/manila/create-share-network-and-list.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "ManilaShares.create_share_network_and_list": [ - { - "args": { - "name": "rally", - "detailed": true, - "search_opts": { - "name": "rally" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "context": { - "quotas": { - "manila": { - "share_networks": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-network-and-list.yaml b/samples/tasks/scenarios/manila/create-share-network-and-list.yaml deleted file mode 100644 index 9822ece4d6..0000000000 --- a/samples/tasks/scenarios/manila/create-share-network-and-list.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - ManilaShares.create_share_network_and_list: - - - args: - name: "rally" - detailed: True - search_opts: - name: "rally" - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - quotas: - manila: - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.json b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.json deleted file mode 100644 index 59eff86864..0000000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.json +++ /dev/null @@ -1,66 +0,0 @@ -{% set use_security_services = use_security_services or False %} -{ - "ManilaShares.create_and_delete_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "min_sleep": 1, - "max_sleep": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - }, - "network": { - "networks_per_tenant": 1, - "start_cidr": "99.0.0.0/24" - }, - "manila_share_networks": { - "use_share_networks": true - } - {% if use_security_services %} - , - "manila_security_services": { - "security_services": [ - {"security_service_type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user"} - ] - } - {% endif %} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.yaml b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.yaml deleted file mode 100644 index a5480340cd..0000000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-delete.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{% set use_security_services = use_security_services or False %} ---- - ManilaShares.create_and_delete_share: - - - args: - share_proto: "nfs" - size: 1 - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - network: - networks_per_tenant: 1 - start_cidr: "99.0.0.0/24" - manila_share_networks: - use_share_networks: True - {% if use_security_services %} - manila_security_services: - security_services: [ - {"security_service_type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user"}, - ] - {% endif %} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.json b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.json deleted file mode 100644 index 9ea7f1a371..0000000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.json +++ /dev/null @@ -1,66 +0,0 @@ -{% set use_security_services = use_security_services or False %} -{ - "ManilaShares.create_and_list_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "min_sleep": 1, - "max_sleep": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - }, - "network": { - "networks_per_tenant": 1, - "start_cidr": "99.0.0.0/24" - }, - "manila_share_networks": { - "use_share_networks": true - } - {% if use_security_services %} - , - "manila_security_services": { - "security_services": [ - {"security_service_type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user"} - ] - } - {% endif %} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.yaml b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.yaml deleted file mode 100644 index e87237ed81..0000000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-list.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{% set use_security_services = use_security_services or False %} ---- - ManilaShares.create_and_list_share: - - - args: - share_proto: "nfs" - size: 1 - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - network: - networks_per_tenant: 1 - start_cidr: "99.0.0.0/24" - manila_share_networks: - use_share_networks: True - {% if use_security_services %} - manila_security_services: - security_services: [ - {"security_service_type": "ldap", - "server": "LDAP server address", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "kerberos", - "dns_ip": "IP address of DNS service to be used", - "server": "Kerberos server address", - "domain": "Kerberos realm", - "user": "User that will be used", - "password": "Password for specified user"}, - {"security_service_type": "active_directory", - "dns_ip": "IP address of DNS service to be used", - "domain": "Domain from 'Active Directory'", - "user": "User from 'Active Directory'", - "password": "password for specified user"}, - ] - {% endif %} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.json b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.json deleted file mode 100644 index ae9840e59d..0000000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "ManilaShares.set_and_delete_metadata": [ - { - "args": { - "sets": 1, - "set_size": 3, - "delete_size": 3, - "key_min_length": 1, - "key_max_length": 256, - "value_min_length": 1, - "value_max_length": 1024 - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - }, - "network": { - "networks_per_tenant": 1, - "start_cidr": "99.0.0.0/24" - }, - "manila_share_networks": { - "use_share_networks": true - }, - "manila_shares": { - "shares_per_tenant": 1, - "share_proto": "NFS", - "size": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.yaml b/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.yaml deleted file mode 100644 index b632230bf2..0000000000 --- a/samples/tasks/scenarios/manila/create-share-with-autocreated-share-networks-and-set-metadata.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- - ManilaShares.set_and_delete_metadata: - - - args: - sets: 1 - set_size: 3 - delete_size: 3 - key_min_length: 1 - key_max_length: 256 - value_min_length: 1 - value_max_length: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - user_choice_method: "round_robin" - network: - networks_per_tenant: 1 - start_cidr: "99.0.0.0/24" - manila_share_networks: - use_share_networks: True - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.json b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.json deleted file mode 100644 index 4bfb2dd6b5..0000000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "ManilaShares.create_and_delete_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "min_sleep": 1, - "max_sleep": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.yaml b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.yaml deleted file mode 100644 index dac101328f..0000000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-delete.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - ManilaShares.create_and_delete_share: - - - args: - share_proto: "nfs" - size: 1 - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.json b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.json deleted file mode 100644 index 18df4deeb4..0000000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "ManilaShares.create_and_list_share": [ - { - "args": { - "share_proto": "nfs", - "size": 1, - "min_sleep": 1, - "max_sleep": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1 - } - }, - "users": { - "tenants": 2, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.yaml b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.yaml deleted file mode 100644 index 11abe54999..0000000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-list.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - ManilaShares.create_and_list_share: - - - args: - share_proto: "nfs" - size: 1 - min_sleep: 1 - max_sleep: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - users: - tenants: 2 - users_per_tenant: 1 - user_choice_method: "round_robin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.json b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.json deleted file mode 100644 index c80cb6192c..0000000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "ManilaShares.set_and_delete_metadata": [ - { - "args": { - "sets": 1, - "set_size": 3, - "delete_size": 3, - "key_min_length": 1, - "key_max_length": 256, - "value_min_length": 1, - "value_max_length": 1024 - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "quotas": { - "manila": { - "shares": -1, - "gigabytes": -1, - "share_networks": -1 - } - }, - "users": { - "tenants": 1, - "users_per_tenant": 1, - "user_choice_method": "round_robin" - }, - "manila_shares": { - "shares_per_tenant": 1, - "share_proto": "NFS", - "size": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.yaml b/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.yaml deleted file mode 100644 index 254ee30d95..0000000000 --- a/samples/tasks/scenarios/manila/create-share-without-share-networks-and-set-metadata.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- - ManilaShares.set_and_delete_metadata: - - - args: - sets: 1 - set_size: 3 - delete_size: 3 - key_min_length: 1 - key_max_length: 256 - value_min_length: 1 - value_max_length: 1024 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - quotas: - manila: - shares: -1 - gigabytes: -1 - share_networks: -1 - users: - tenants: 1 - users_per_tenant: 1 - user_choice_method: "round_robin" - manila_shares: - shares_per_tenant: 1 - share_proto: "NFS" - size: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/list-share-servers.json b/samples/tasks/scenarios/manila/list-share-servers.json deleted file mode 100644 index 628b5b2369..0000000000 --- a/samples/tasks/scenarios/manila/list-share-servers.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "ManilaShares.list_share_servers": [ - { - "args": { - "search_opts": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/list-share-servers.yaml b/samples/tasks/scenarios/manila/list-share-servers.yaml deleted file mode 100644 index f2993cea61..0000000000 --- a/samples/tasks/scenarios/manila/list-share-servers.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - ManilaShares.list_share_servers: - - - args: - search_opts: {} - runner: - type: "constant" - times: 10 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/manila/list-shares.json b/samples/tasks/scenarios/manila/list-shares.json deleted file mode 100644 index 94871b4691..0000000000 --- a/samples/tasks/scenarios/manila/list-shares.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "ManilaShares.list_shares": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "times": 12, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 4, - "user_choice_method": "round_robin" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/manila/list-shares.yaml b/samples/tasks/scenarios/manila/list-shares.yaml deleted file mode 100644 index 0805901c5b..0000000000 --- a/samples/tasks/scenarios/manila/list-shares.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - ManilaShares.list_shares: - - - args: - detailed: True - runner: - type: "constant" - times: 12 - concurrency: 1 - context: - users: - tenants: 3 - users_per_tenant: 4 - user_choice_method: "round_robin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.json b/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.json deleted file mode 100644 index e14fcb6d50..0000000000 --- a/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "workflow_name": "wf1", - "do_delete": true - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.yaml b/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.yaml deleted file mode 100644 index 8d2dc426ed..0000000000 --- a/samples/tasks/scenarios/mistral/create-delete-execution-with-workflow-name.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - workflow_name: wf1 - do_delete: true - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - diff --git a/samples/tasks/scenarios/mistral/create-delete-execution.json b/samples/tasks/scenarios/mistral/create-delete-execution.json deleted file mode 100644 index c715b68027..0000000000 --- a/samples/tasks/scenarios/mistral/create-delete-execution.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "do_delete": true - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-delete-execution.yaml b/samples/tasks/scenarios/mistral/create-delete-execution.yaml deleted file mode 100644 index 5c8e20d25c..0000000000 --- a/samples/tasks/scenarios/mistral/create-delete-execution.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - do_delete: true - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/mistral/create-delete-workbook.json b/samples/tasks/scenarios/mistral/create-delete-workbook.json deleted file mode 100644 index 9e360c3788..0000000000 --- a/samples/tasks/scenarios/mistral/create-delete-workbook.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MistralWorkbooks.create_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "do_delete": true - }, - "runner": { - "type": "constant", - "times": 50, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-delete-workbook.yaml b/samples/tasks/scenarios/mistral/create-delete-workbook.yaml deleted file mode 100644 index 365751ee12..0000000000 --- a/samples/tasks/scenarios/mistral/create-delete-workbook.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MistralWorkbooks.create_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - do_delete: true - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/mistral/create-execution-with-inputs.json b/samples/tasks/scenarios/mistral/create-execution-with-inputs.json deleted file mode 100644 index ab3418607a..0000000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-inputs.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "wf_input": "rally-jobs/extra/mistral_input.json" - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-execution-with-inputs.yaml b/samples/tasks/scenarios/mistral/create-execution-with-inputs.yaml deleted file mode 100644 index bc97406b5b..0000000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-inputs.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - wf_input: rally-jobs/extra/mistral_input.json - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/mistral/create-execution-with-params.json b/samples/tasks/scenarios/mistral/create-execution-with-params.json deleted file mode 100644 index 07c59a9dd8..0000000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-params.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "params": "rally-jobs/extra/mistral_params.json" - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-execution-with-params.yaml b/samples/tasks/scenarios/mistral/create-execution-with-params.yaml deleted file mode 100644 index 26bb3d1ef5..0000000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-params.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - params: rally-jobs/extra/mistral_params.json - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.json b/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.json deleted file mode 100644 index f871443886..0000000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml", - "workflow_name": "wf1" - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.yaml b/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.yaml deleted file mode 100644 index ec24be619e..0000000000 --- a/samples/tasks/scenarios/mistral/create-execution-with-workflow-name.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - workflow_name: wf1 - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/mistral/create-execution.json b/samples/tasks/scenarios/mistral/create-execution.json deleted file mode 100644 index 4d3e5de4b4..0000000000 --- a/samples/tasks/scenarios/mistral/create-execution.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "MistralExecutions.create_execution_from_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml" - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-execution.yaml b/samples/tasks/scenarios/mistral/create-execution.yaml deleted file mode 100644 index 2d8c01a7ca..0000000000 --- a/samples/tasks/scenarios/mistral/create-execution.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - MistralExecutions.create_execution_from_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - runner: - type: "constant" - times: 20 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/mistral/create-workbook.json b/samples/tasks/scenarios/mistral/create-workbook.json deleted file mode 100644 index f95f0273fb..0000000000 --- a/samples/tasks/scenarios/mistral/create-workbook.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "MistralWorkbooks.create_workbook": [ - { - "args": { - "definition": "rally-jobs/extra/mistral_wb.yaml" - }, - "runner": { - "type": "constant", - "times": 50, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/create-workbook.yaml b/samples/tasks/scenarios/mistral/create-workbook.yaml deleted file mode 100644 index 1b3940fd4b..0000000000 --- a/samples/tasks/scenarios/mistral/create-workbook.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - MistralWorkbooks.create_workbook: - - - args: - definition: rally-jobs/extra/mistral_wb.yaml - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/mistral/list-executions.json b/samples/tasks/scenarios/mistral/list-executions.json deleted file mode 100644 index be342ced1c..0000000000 --- a/samples/tasks/scenarios/mistral/list-executions.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "MistralExecutions.list_executions": [ - { - "runner": { - "type": "constant", - "times": 50, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/list-executions.yaml b/samples/tasks/scenarios/mistral/list-executions.yaml deleted file mode 100644 index 5eae18247d..0000000000 --- a/samples/tasks/scenarios/mistral/list-executions.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - MistralExecutions.list_executions: - - - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/mistral/list-workbooks.json b/samples/tasks/scenarios/mistral/list-workbooks.json deleted file mode 100644 index 5070c58937..0000000000 --- a/samples/tasks/scenarios/mistral/list-workbooks.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "MistralWorkbooks.list_workbooks": [ - { - "runner": { - "type": "constant", - "times": 50, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/mistral/list-workbooks.yaml b/samples/tasks/scenarios/mistral/list-workbooks.yaml deleted file mode 100644 index a176536867..0000000000 --- a/samples/tasks/scenarios/mistral/list-workbooks.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - MistralWorkbooks.list_workbooks: - - - runner: - type: "constant" - times: 50 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/monasca/list-metrics.json b/samples/tasks/scenarios/monasca/list-metrics.json deleted file mode 100644 index 6ccf10345a..0000000000 --- a/samples/tasks/scenarios/monasca/list-metrics.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "MonascaMetrics.list_metrics": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "monasca-user" - ], - "monasca_metrics": { - "dimensions": { - "region": "RegionOne", - "service": "identity", - "hostname": "fake_host", - "url": "http://fake_host:5000/v2.0" - }, - "metrics_per_tenant": 10 - } - }, - "args": { - "region": "RegionOne", - "limit": 5 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/monasca/list-metrics.yaml b/samples/tasks/scenarios/monasca/list-metrics.yaml deleted file mode 100644 index fa134e969a..0000000000 --- a/samples/tasks/scenarios/monasca/list-metrics.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- - MonascaMetrics.list_metrics: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "monasca-user" - monasca_metrics: - "dimensions": - "region": "RegionOne" - "service": "identity" - "hostname": "fake_host" - "url": "http://fake_host:5000/v2.0" - "metrics_per_tenant": 10 - args: - "region": "RegionOne" - "limit": 5 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/murano/create-and-delete-environment.json b/samples/tasks/scenarios/murano/create-and-delete-environment.json deleted file mode 100644 index 60db1cbddf..0000000000 --- a/samples/tasks/scenarios/murano/create-and-delete-environment.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "MuranoEnvironments.create_and_delete_environment": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/create-and-delete-environment.yaml b/samples/tasks/scenarios/murano/create-and-delete-environment.yaml deleted file mode 100644 index cae07c743a..0000000000 --- a/samples/tasks/scenarios/murano/create-and-delete-environment.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - MuranoEnvironments.create_and_delete_environment: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/murano/create-and-deploy-environment.json b/samples/tasks/scenarios/murano/create-and-deploy-environment.json deleted file mode 100644 index 21b4ca25b2..0000000000 --- a/samples/tasks/scenarios/murano/create-and-deploy-environment.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "MuranoEnvironments.create_and_deploy_environment": [ - { - "args": { - "packages_per_env": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_packages": { - "app_package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - }, - "roles": ["admin"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "packages_per_env": 2 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_packages": { - "app_package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - }, - "roles": ["admin"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/create-and-deploy-environment.yaml b/samples/tasks/scenarios/murano/create-and-deploy-environment.yaml deleted file mode 100644 index 8317b13cad..0000000000 --- a/samples/tasks/scenarios/murano/create-and-deploy-environment.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- - MuranoEnvironments.create_and_deploy_environment: - - - args: - packages_per_env: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter.zip" - roles: - - "admin" - sla: - failure_rate: - max: 0 - - - args: - packages_per_env: 2 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_packages: - app_package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - roles: - - "admin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/murano/import-and-delete-package.json b/samples/tasks/scenarios/murano/import-and-delete-package.json deleted file mode 100644 index 1aa65c57c7..0000000000 --- a/samples/tasks/scenarios/murano/import-and-delete-package.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "MuranoPackages.import_and_delete_package": [ - { - "args": { - "package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/import-and-delete-package.yaml b/samples/tasks/scenarios/murano/import-and-delete-package.yaml deleted file mode 100644 index f640430cec..0000000000 --- a/samples/tasks/scenarios/murano/import-and-delete-package.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - MuranoPackages.import_and_delete_package: - - - args: - package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/murano/import-and-filter-applications.json b/samples/tasks/scenarios/murano/import-and-filter-applications.json deleted file mode 100644 index 86ba002c25..0000000000 --- a/samples/tasks/scenarios/murano/import-and-filter-applications.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "MuranoPackages.import_and_filter_applications": [ - { - "args": { - "package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/", - "filter_query": {"category" : "Web"} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/import-and-filter-applications.yaml b/samples/tasks/scenarios/murano/import-and-filter-applications.yaml deleted file mode 100644 index e512a7ff26..0000000000 --- a/samples/tasks/scenarios/murano/import-and-filter-applications.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - MuranoPackages.import_and_filter_applications: - - - args: - package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - filter_query: {"category" : "Web"} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/murano/import-and-list-packages.json b/samples/tasks/scenarios/murano/import-and-list-packages.json deleted file mode 100644 index 547e26de19..0000000000 --- a/samples/tasks/scenarios/murano/import-and-list-packages.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "MuranoPackages.import_and_list_packages": [ - { - "args": { - "package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/import-and-list-packages.yaml b/samples/tasks/scenarios/murano/import-and-list-packages.yaml deleted file mode 100644 index 94bd5ab5c1..0000000000 --- a/samples/tasks/scenarios/murano/import-and-list-packages.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - MuranoPackages.import_and_list_packages: - - - args: - package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/murano/list-environments.json b/samples/tasks/scenarios/murano/list-environments.json deleted file mode 100644 index 5c5f1d0c13..0000000000 --- a/samples/tasks/scenarios/murano/list-environments.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "MuranoEnvironments.list_environments": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "murano_environments": { - "environments_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/list-environments.yaml b/samples/tasks/scenarios/murano/list-environments.yaml deleted file mode 100644 index e7de2257c5..0000000000 --- a/samples/tasks/scenarios/murano/list-environments.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - MuranoEnvironments.list_environments: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - murano_environments: - environments_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/murano/package-lifecycle.json b/samples/tasks/scenarios/murano/package-lifecycle.json deleted file mode 100644 index 3e21c49d25..0000000000 --- a/samples/tasks/scenarios/murano/package-lifecycle.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "MuranoPackages.package_lifecycle": [ - { - "args": { - "package": "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/", - "body": {"categories": ["Web"]}, - "operation": "add" - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/murano/package-lifecycle.yaml b/samples/tasks/scenarios/murano/package-lifecycle.yaml deleted file mode 100644 index 58ed5fcbef..0000000000 --- a/samples/tasks/scenarios/murano/package-lifecycle.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - MuranoPackages.package_lifecycle: - - - args: - package: "rally-jobs/extra/murano/applications/HelloReporter/io.murano.apps.HelloReporter/" - body: {"categories": ["Web"]} - operation: "add" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.json deleted file mode 100644 index 9953eb0f88..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - }, - "network": {}, - "servers": { - "servers_per_tenant": 1, - "auto_assign_nic": true, - "flavor" : { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.yaml deleted file mode 100644 index 24ca50e81f..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-networks-bgpvpns.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - servers: - servers_per_tenant: 1 - auto_assign_nic: True - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.json deleted file mode 100644 index cd5c35d9fd..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - }, - "network": {}, - "servers": { - "servers_per_tenant": 1, - "auto_assign_nic": true, - "flavor" : { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.yaml deleted file mode 100644 index b979c0051a..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-assoc-disassoc-routers-bgpvpns.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: {} - servers: - servers_per_tenant: 1 - auto_assign_nic: True - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.json deleted file mode 100644 index e46fa95b1e..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "NeutronBGPVPN.create_and_delete_bgpvpns": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.yaml deleted file mode 100644 index 174afe0f68..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-bgpvpns.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NeutronBGPVPN.create_and_delete_bgpvpns: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.json b/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.json deleted file mode 100644 index 46ef74cd93..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_floating_ips": [ - { - "args": { - "floating_network": "public", - "floating_ip_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "floatingip": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.yaml b/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.yaml deleted file mode 100644 index e818456f3d..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-floating-ips.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronNetworks.create_and_delete_floating_ips: - - - args: - floating_network: "public" - floating_ip_args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - floatingip: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.json b/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.json deleted file mode 100644 index f9a3b2d5e9..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_delete_healthmonitors": [ - { - "args": { - "healthmonitor_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "health_monitor": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.yaml b/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.yaml deleted file mode 100644 index 1efb2ad276..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-healthmonitors.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_delete_healthmonitors: - - - args: - healthmonitor_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 2 - quotas: - neutron: - health_monitor: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-networks.json b/samples/tasks/scenarios/neutron/create-and-delete-networks.json deleted file mode 100644 index 9189b34719..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-networks.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_networks": [ - { - "args": { - "network_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-networks.yaml b/samples/tasks/scenarios/neutron/create-and-delete-networks.yaml deleted file mode 100644 index 7ba6b2a4f9..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-networks.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-pools.json b/samples/tasks/scenarios/neutron/create-and-delete-pools.json deleted file mode 100644 index 7176974ef6..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-pools.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_delete_pools": [ - { - "args": { - "pool_create_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "network":{}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-pools.yaml b/samples/tasks/scenarios/neutron/create-and-delete-pools.yaml deleted file mode 100644 index 1df6f5beb2..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-pools.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_delete_pools: - - - args: - pool_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-ports.json b/samples/tasks/scenarios/neutron/create-and-delete-ports.json deleted file mode 100644 index f186204bc8..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-ports.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_ports": [ - { - "args": { - "network_create_args": {}, - "port_create_args": {}, - "ports_per_network": 10 - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "port": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-ports.yaml b/samples/tasks/scenarios/neutron/create-and-delete-ports.yaml deleted file mode 100644 index 6d7a3cc79b..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-ports.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 10 - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-routers.json b/samples/tasks/scenarios/neutron/create-and-delete-routers.json deleted file mode 100644 index d076a2329b..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-routers.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_routers": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2, - "router_create_args": {} - }, - "runner": { - "type": "constant", - "times": 30, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "router": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-routers.yaml b/samples/tasks/scenarios/neutron/create-and-delete-routers.yaml deleted file mode 100644 index 07b6198c9d..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-routers.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- - NeutronNetworks.create_and_delete_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - runner: - type: "constant" - times: 30 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-security-group-rule.json b/samples/tasks/scenarios/neutron/create-and-delete-security-group-rule.json deleted file mode 100644 index 0c35a7ef60..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-security-group-rule.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_delete_security_group_rule": [ - { - "args": { - "security_group_args": {}, - "security_group_rule_args":{} - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-security-group-rule.yaml b/samples/tasks/scenarios/neutron/create-and-delete-security-group-rule.yaml deleted file mode 100644 index b87add0e47..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-security-group-rule.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronSecurityGroup.create_and_delete_security_group_rule: - - - args: - security_group_args: {} - security_group_rule_args: {} - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-security-groups.json b/samples/tasks/scenarios/neutron/create-and-delete-security-groups.json deleted file mode 100644 index 3168524ef6..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-security-groups.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_delete_security_groups": [ - { - "args": { - "security_group_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-security-groups.yaml b/samples/tasks/scenarios/neutron/create-and-delete-security-groups.yaml deleted file mode 100644 index 2acd14c1eb..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-security-groups.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronSecurityGroup.create_and_delete_security_groups: - - - args: - security_group_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-subnets.json b/samples/tasks/scenarios/neutron/create-and-delete-subnets.json deleted file mode 100644 index da9cb08cc9..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-subnets.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "NeutronNetworks.create_and_delete_subnets": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-subnets.yaml b/samples/tasks/scenarios/neutron/create-and-delete-subnets.yaml deleted file mode 100644 index 754b4ac283..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-subnets.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-delete-vips.json b/samples/tasks/scenarios/neutron/create-and-delete-vips.json deleted file mode 100644 index eca2d4675c..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-vips.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_delete_vips": [ - { - "args": { - "vip_create_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 1 - }, - "network": {}, - "lbaas": { - "pool": {} - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1, - "vip": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-delete-vips.yaml b/samples/tasks/scenarios/neutron/create-and-delete-vips.yaml deleted file mode 100644 index 7a77e57dd4..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-delete-vips.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_delete_vips: - - - args: - vip_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 1 - network: {} - lbaas: - pool: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 - vip: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.json deleted file mode 100644 index b336553d26..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "NeutronBGPVPN.create_and_list_bgpvpns": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.yaml deleted file mode 100644 index cede19bab5..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-bgpvpns.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NeutronBGPVPN.create_and_list_bgpvpns: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 \ No newline at end of file diff --git a/samples/tasks/scenarios/neutron/create-and-list-floating-ips.json b/samples/tasks/scenarios/neutron/create-and-list-floating-ips.json deleted file mode 100644 index 20366a9bc5..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-floating-ips.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "NeutronNetworks.create_and_list_floating_ips": [ - { - "args": { - "floating_network": "public", - "floating_ip_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "floatingip": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-floating-ips.yaml b/samples/tasks/scenarios/neutron/create-and-list-floating-ips.yaml deleted file mode 100644 index d320b6cdea..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-floating-ips.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronNetworks.create_and_list_floating_ips: - - - args: - floating_network: "public" - floating_ip_args: {} - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - floatingip: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.json b/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.json deleted file mode 100644 index 31ae0b45d5..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_list_healthmonitors": [ - { - "args": { - "healthmonitor_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "health_monitor": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.yaml b/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.yaml deleted file mode 100644 index b67873097d..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-healthmonitors.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_list_healthmonitors: - - - args: - healthmonitor_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 2 - quotas: - neutron: - health_monitor: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.json b/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.json deleted file mode 100644 index 2bf67baa80..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "NeutronLoadbalancerV2.create_and_list_loadbalancers": [ - { - "args": { - "lb_create_args": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.yaml b/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.yaml deleted file mode 100644 index e5b431cbb8..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-loadbalancers.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - NeutronLoadbalancerV2.create_and_list_loadbalancers: - - - args: - lb_create_args: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-networks-associations.json b/samples/tasks/scenarios/neutron/create-and-list-networks-associations.json deleted file mode 100644 index d397600b28..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-networks-associations.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NeutronBGPVPN.create_and_list_networks_associations": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "network": {}, - "servers": { - "servers_per_tenant": 1, - "auto_assign_nic": true, - "flavor" : { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-networks-associations.yaml b/samples/tasks/scenarios/neutron/create-and-list-networks-associations.yaml deleted file mode 100644 index fb24a25ae2..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-networks-associations.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NeutronBGPVPN.create_and_list_networks_associations: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - servers: - servers_per_tenant: 1 - auto_assign_nic: True - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-networks.json b/samples/tasks/scenarios/neutron/create-and-list-networks.json deleted file mode 100644 index 826278c8f6..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-networks.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "NeutronNetworks.create_and_list_networks": [ - { - "args": { - "network_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "network_create_args": { - "provider:network_type": "vxlan" - } - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - }, - "roles": ["admin"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-networks.yaml b/samples/tasks/scenarios/neutron/create-and-list-networks.yaml deleted file mode 100644 index 4c59fc85e7..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-networks.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- - NeutronNetworks.create_and_list_networks: - - - args: - network_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 - - - args: - network_create_args: - provider:network_type: "vxlan" - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - roles: - - "admin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-pools.json b/samples/tasks/scenarios/neutron/create-and-list-pools.json deleted file mode 100644 index 6ea5cb6601..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-pools.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_list_pools": [ - { - "args": { - "pool_create_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "network":{}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-pools.yaml b/samples/tasks/scenarios/neutron/create-and-list-pools.yaml deleted file mode 100644 index bce86f1516..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-pools.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_list_pools: - - - args: - pool_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-ports.json b/samples/tasks/scenarios/neutron/create-and-list-ports.json deleted file mode 100644 index 5f9d52bba8..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-ports.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "NeutronNetworks.create_and_list_ports": [ - { - "args": { - "network_create_args": {}, - "port_create_args": {}, - "ports_per_network": 10 - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "port": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-ports.yaml b/samples/tasks/scenarios/neutron/create-and-list-ports.yaml deleted file mode 100644 index 24f56eb754..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-ports.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 10 - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-routers-associations.json b/samples/tasks/scenarios/neutron/create-and-list-routers-associations.json deleted file mode 100644 index d0837561fb..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-routers-associations.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NeutronBGPVPN.create_and_list_routers_associations": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "network": {}, - "servers": { - "servers_per_tenant": 1, - "auto_assign_nic": true, - "flavor" : { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-routers-associations.yaml b/samples/tasks/scenarios/neutron/create-and-list-routers-associations.yaml deleted file mode 100644 index 000b2e233d..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-routers-associations.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NeutronBGPVPN.create_and_list_routers_associations: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - servers: - servers_per_tenant: 1 - auto_assign_nic: True - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-routers.json b/samples/tasks/scenarios/neutron/create-and-list-routers.json deleted file mode 100644 index 352093aed8..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-routers.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "NeutronNetworks.create_and_list_routers": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2, - "router_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "network": {}, - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "router": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-routers.yaml b/samples/tasks/scenarios/neutron/create-and-list-routers.yaml deleted file mode 100644 index 5adcb1bc50..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-routers.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - network: {} - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.json b/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.json deleted file mode 100644 index a7b0a7cd4e..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_list_security_group_rules": [ - { - "args": { - "security_group_args": {}, - "security_group_rule_args":{} - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.yaml b/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.yaml deleted file mode 100644 index f39e46ae5a..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-security-group-rules.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronSecurityGroup.create_and_list_security_group_rules: - - - args: - security_group_args: {} - security_group_rule_args: {} - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-security-groups.json b/samples/tasks/scenarios/neutron/create-and-list-security-groups.json deleted file mode 100644 index f66acd0cba..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-security-groups.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_list_security_groups": [ - { - "args": { - "security_group_create_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-security-groups.yaml b/samples/tasks/scenarios/neutron/create-and-list-security-groups.yaml deleted file mode 100644 index af23868e2e..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-security-groups.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronSecurityGroup.create_and_list_security_groups: - - - args: - security_group_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-subnets.json b/samples/tasks/scenarios/neutron/create-and-list-subnets.json deleted file mode 100644 index bc70b16eee..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-subnets.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "NeutronNetworks.create_and_list_subnets": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-subnets.yaml b/samples/tasks/scenarios/neutron/create-and-list-subnets.yaml deleted file mode 100644 index b4be6c448e..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-subnets.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - NeutronNetworks.create_and_list_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-list-vips.json b/samples/tasks/scenarios/neutron/create-and-list-vips.json deleted file mode 100644 index ebe7fd3728..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-vips.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_list_vips": [ - { - "args": { - "vip_create_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 2 - }, - "network":{}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1, - "vip": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-list-vips.yaml b/samples/tasks/scenarios/neutron/create-and-list-vips.yaml deleted file mode 100644 index 63a62578d9..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-list-vips.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_list_vips: - - - args: - vip_create_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 2 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 - vip: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-network.json b/samples/tasks/scenarios/neutron/create-and-show-network.json deleted file mode 100644 index 559fbbcca6..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-network.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronNetworks.create_and_show_network": [ - { - "args": { - "network_create_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-network.yaml b/samples/tasks/scenarios/neutron/create-and-show-network.yaml deleted file mode 100644 index 110900230e..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-network.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronNetworks.create_and_show_network: - - - args: - network_create_args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-ports.json b/samples/tasks/scenarios/neutron/create-and-show-ports.json deleted file mode 100644 index 8fec44d23e..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-ports.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "NeutronNetworks.create_and_show_ports": [ - { - "args": { - "network_create_args": {}, - "port_create_args": {}, - "ports_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "network": -1, - "port": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-ports.yaml b/samples/tasks/scenarios/neutron/create-and-show-ports.yaml deleted file mode 100644 index 13fb030806..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-ports.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - NeutronNetworks.create_and_show_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 2 - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-routers.json b/samples/tasks/scenarios/neutron/create-and-show-routers.json deleted file mode 100644 index c81692a134..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-routers.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "NeutronNetworks.create_and_show_routers": [ - { - "args": { - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "router": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-routers.yaml b/samples/tasks/scenarios/neutron/create-and-show-routers.yaml deleted file mode 100644 index 85eaeff280..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-routers.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - NeutronNetworks.create_and_show_routers: - - - args: - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.json b/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.json deleted file mode 100644 index 10d1d8f620..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_show_security_group_rule": [ - { - "args": { - "security_group_args": {}, - "security_group_rule_args":{} - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.yaml b/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.yaml deleted file mode 100644 index 759ff95d46..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-security-group-rule.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronSecurityGroup.create_and_show_security_group_rule: - - - args: - security_group_args: {} - security_group_rule_args: {} - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-security-group.json b/samples/tasks/scenarios/neutron/create-and-show-security-group.json deleted file mode 100644 index b4b54b7bc9..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-security-group.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_show_security_group": [ - { - "args": { - "security_group_create_args": {} - }, - "runner": { - "type": "constant", - "times": 20, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-security-group.yaml b/samples/tasks/scenarios/neutron/create-and-show-security-group.yaml deleted file mode 100644 index 099274b446..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-security-group.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronSecurityGroup.create_and_show_security_group: - - - args: - security_group_create_args: {} - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-show-subnets.json b/samples/tasks/scenarios/neutron/create-and-show-subnets.json deleted file mode 100644 index da885f93aa..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-subnets.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "NeutronNetworks.create_and_show_subnets": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-show-subnets.yaml b/samples/tasks/scenarios/neutron/create-and-show-subnets.yaml deleted file mode 100644 index f1432ef13f..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-show-subnets.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - NeutronNetworks.create_and_show_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.json b/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.json deleted file mode 100644 index 411045e2f0..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "NeutronBGPVPN.create_and_update_bgpvpns": [ - { - "args":{}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.yaml b/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.yaml deleted file mode 100644 index e248ad105d..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-bgpvpns.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NeutronBGPVPN.create_and_update_bgpvpns: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.json b/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.json deleted file mode 100644 index b9c7a96cb7..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_update_healthmonitors": [ - { - "args": { - "healthmonitor_create_args": {}, - "healthmonitor_update_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 5, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "health_monitor": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.yaml b/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.yaml deleted file mode 100644 index 977e944d94..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-healthmonitors.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_update_healthmonitors: - - - args: - healthmonitor_create_args: {} - healthmonitor_update_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 5 - users_per_tenant: 2 - quotas: - neutron: - health_monitor: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-networks.json b/samples/tasks/scenarios/neutron/create-and-update-networks.json deleted file mode 100644 index 6b04bc0ace..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-networks.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "NeutronNetworks.create_and_update_networks": [ - { - "args": { - "network_update_args": { - "admin_state_up": false, - "name": "_updated" - }, - "network_create_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-networks.yaml b/samples/tasks/scenarios/neutron/create-and-update-networks.yaml deleted file mode 100644 index 43934bcfc2..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-networks.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: False - name: "_updated" - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-pools.json b/samples/tasks/scenarios/neutron/create-and-update-pools.json deleted file mode 100644 index ba3f85974f..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-pools.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_update_pools": [ - { - "args": { - "pool_create_args":{}, - "pool_update_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "network":{}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-pools.yaml b/samples/tasks/scenarios/neutron/create-and-update-pools.yaml deleted file mode 100644 index 3748d01bc6..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-pools.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_update_pools: - - - args: - pool_create_args: {} - pool_update_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-ports.json b/samples/tasks/scenarios/neutron/create-and-update-ports.json deleted file mode 100644 index 228744ca68..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-ports.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "NeutronNetworks.create_and_update_ports": [ - { - "args": { - "network_create_args": {}, - "port_create_args": {}, - "port_update_args": { - "admin_state_up": false, - "device_id": "dummy_id", - "device_owner": "dummy_owner", - "name": "_port_updated" - }, - "ports_per_network": 5 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "port": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-ports.yaml b/samples/tasks/scenarios/neutron/create-and-update-ports.yaml deleted file mode 100644 index f315b117fb..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-ports.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 5 - port_update_args: - admin_state_up: False - device_id: "dummy_id" - device_owner: "dummy_owner" - name: "_port_updated" - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - port: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-routers.json b/samples/tasks/scenarios/neutron/create-and-update-routers.json deleted file mode 100644 index fa2d2a0d2f..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-routers.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "NeutronNetworks.create_and_update_routers": [ - { - "args": { - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "subnets_per_network": 2, - "router_create_args": {}, - "router_update_args": { - "admin_state_up": false, - "name": "_router_updated" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "router": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-routers.yaml b/samples/tasks/scenarios/neutron/create-and-update-routers.yaml deleted file mode 100644 index f68d2477b5..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-routers.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 2 - router_create_args: {} - router_update_args: - admin_state_up: False - name: "_router_updated" - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-security-groups.json b/samples/tasks/scenarios/neutron/create-and-update-security-groups.json deleted file mode 100644 index 820d9bc7f6..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-security-groups.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "NeutronSecurityGroup.create_and_update_security_groups": [ - { - "args": { - "security_group_create_args": {}, - "security_group_update_args": {} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "security_group": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-security-groups.yaml b/samples/tasks/scenarios/neutron/create-and-update-security-groups.yaml deleted file mode 100644 index 8639315728..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-security-groups.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NeutronSecurityGroup.create_and_update_security_groups: - - - args: - security_group_create_args: {} - security_group_update_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 3 - users_per_tenant: 3 - quotas: - neutron: - security_group: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-subnets.json b/samples/tasks/scenarios/neutron/create-and-update-subnets.json deleted file mode 100644 index 267b7e44ac..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-subnets.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "NeutronNetworks.create_and_update_subnets": [ - { - "args": { - "subnet_update_args": { - "enable_dhcp": false, - "name": "_subnet_updated" - }, - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.4.0.0/16", - "subnets_per_network": 2 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 3 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-subnets.yaml b/samples/tasks/scenarios/neutron/create-and-update-subnets.yaml deleted file mode 100644 index d9d43771ec..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-subnets.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnets_per_network: 2 - subnet_update_args: - enable_dhcp: False - name: "_subnet_updated" - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 3 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/create-and-update-vips.json b/samples/tasks/scenarios/neutron/create-and-update-vips.json deleted file mode 100644 index be2df1b087..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-vips.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "NeutronLoadbalancerV1.create_and_update_vips": [ - { - "args": { - "vip_create_args":{}, - "vip_update_args":{} - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "network": {}, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1, - "pool": -1, - "vip": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/create-and-update-vips.yaml b/samples/tasks/scenarios/neutron/create-and-update-vips.yaml deleted file mode 100644 index 8d3e2644ee..0000000000 --- a/samples/tasks/scenarios/neutron/create-and-update-vips.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - NeutronLoadbalancerV1.create_and_update_vips: - - - args: - vip_create_args: {} - vip_update_args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - pool: -1 - vip: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/delete-subnets.json b/samples/tasks/scenarios/neutron/delete-subnets.json deleted file mode 100644 index 04070a7d0a..0000000000 --- a/samples/tasks/scenarios/neutron/delete-subnets.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "NeutronSubnets.delete_subnets": [ - { - "runner": { - "type": "constant", - "times": 15, - "concurrency": 15 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 15, - "user_choice_method": "round_robin" - }, - "network": { - "subnets_per_network": 15, - "dualstack": true, - "router": {} - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/delete-subnets.yaml b/samples/tasks/scenarios/neutron/delete-subnets.yaml deleted file mode 100644 index 9e73f6cd64..0000000000 --- a/samples/tasks/scenarios/neutron/delete-subnets.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NeutronSubnets.delete_subnets: - - - runner: - type: "constant" - times: 15 - concurrency: 15 - context: - users: - tenants: 1 - users_per_tenant: 15 - user_choice_method: "round_robin" - network: - subnets_per_network: 15 - dualstack: true - router: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/list-agents.json b/samples/tasks/scenarios/neutron/list-agents.json deleted file mode 100644 index e9fbcb6bef..0000000000 --- a/samples/tasks/scenarios/neutron/list-agents.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NeutronNetworks.list_agents": [ - { - "args": { - "agent_args": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 3 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/list-agents.yaml b/samples/tasks/scenarios/neutron/list-agents.yaml deleted file mode 100644 index b1d15cb0bf..0000000000 --- a/samples/tasks/scenarios/neutron/list-agents.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NeutronNetworks.list_agents: - - - args: - agent_args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 3 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.json b/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.json deleted file mode 100644 index 5a0cbfb959..0000000000 --- a/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "NeutronNetworks.set_and_clear_router_gateway": [ - { - "args": { - "network_create_args": { - "router:external": true - }, - "router_create_args": {} - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "network": -1, - "router": -1 - } - }, - "roles": ["admin"] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.yaml b/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.yaml deleted file mode 100644 index 27af687398..0000000000 --- a/samples/tasks/scenarios/neutron/set-and-clear-router-gateway.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - NeutronNetworks.set_and_clear_router_gateway: - - - args: - network_create_args: - router:external: True - router_create_args: {} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - router: -1 - roles: - - "admin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.json b/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.json deleted file mode 100644 index b1e2c7095e..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.json +++ /dev/null @@ -1,62 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_associate_floating_ip": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "create_floating_ip_args": { - "ext_network": "ext_network_name" - } - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.yaml b/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.yaml deleted file mode 100644 index deb5e160af..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-associate-floating-ip.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_associate_floating_ip: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - create_floating_ip_args: - ext_network: "ext_network_name" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-block-migrate.json b/samples/tasks/scenarios/nova/boot-and-block-migrate.json deleted file mode 100644 index 3cd1940277..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-block-migrate.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_live_migrate_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "block_migration": true - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-block-migrate.yaml b/samples/tasks/scenarios/nova/boot-and-block-migrate.yaml deleted file mode 100644 index e158a60da8..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-block-migrate.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_live_migrate_server: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - block_migration: true - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-delete-multiple.json b/samples/tasks/scenarios/nova/boot-and-delete-multiple.json deleted file mode 100644 index af6b0c6797..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-delete-multiple.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_delete_multiple_servers": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "times": 1 - }, - "args": { - "count": 5, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - } - }, - "context": { - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-delete-multiple.yaml b/samples/tasks/scenarios/nova/boot-and-delete-multiple.yaml deleted file mode 100644 index 3cb87702c7..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-delete-multiple.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_delete_multiple_servers: - - - args: - image: - name: "^cirros.*-disk$" - flavor: - name: "{{flavor_name}}" - count: 5 - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.json b/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.json deleted file mode 100644 index 39298ba066..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaKeypair.boot_and_delete_server_with_keypair": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "boot_server_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 1 - }, - "network": { - "start_cidr": "100.1.0.0/26" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.yaml b/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.yaml deleted file mode 100644 index bc3058d7ad..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-delete-server-with-keypairs.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaKeypair.boot_and_delete_server_with_keypair: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - boot_server_kwargs: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 1 - network: - start_cidr: "100.1.0.0/26" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-delete.json b/samples/tasks/scenarios/nova/boot-and-delete.json deleted file mode 100644 index 8536ee38aa..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-delete.json +++ /dev/null @@ -1,63 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_delete_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "auto_assign_nic": true - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "network": { - "start_cidr": "10.2.0.0/24", - "networks_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-delete.yaml b/samples/tasks/scenarios/nova/boot-and-delete.yaml deleted file mode 100644 index c4588432bf..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-delete.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_delete_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - auto_assign_nic: true - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: - start_cidr: "10.2.0.0/24" - networks_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-get-console-server.json b/samples/tasks/scenarios/nova/boot-and-get-console-server.json deleted file mode 100644 index d89a234dd3..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-get-console-server.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = image_name or "^cirros.*-disk$" %} -{ - "NovaServers.boot_and_get_console_output": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - } - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-get-console-server.yaml b/samples/tasks/scenarios/nova/boot-and-get-console-server.yaml deleted file mode 100644 index d1bfa41e04..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-get-console-server.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = image_name or "^cirros.*-disk$" %} ---- - NovaServers.boot_and_get_console_output: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-get-console-url.json b/samples/tasks/scenarios/nova/boot-and-get-console-url.json deleted file mode 100644 index 7f34131a77..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-get-console-url.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_get_console_url": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "console_type": "novnc" - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-get-console-url.yaml b/samples/tasks/scenarios/nova/boot-and-get-console-url.yaml deleted file mode 100644 index b1c930a3be..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-get-console-url.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_get_console_url: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - console_type: "novnc" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-list.json b/samples/tasks/scenarios/nova/boot-and-list.json deleted file mode 100644 index b852a6eda1..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-list.json +++ /dev/null @@ -1,33 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_list_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "detailed": true - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/nova/boot-and-list.yaml b/samples/tasks/scenarios/nova/boot-and-list.yaml deleted file mode 100644 index a4a1b85398..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-list.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_list_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - detailed: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-live-migrate.json b/samples/tasks/scenarios/nova/boot-and-live-migrate.json deleted file mode 100644 index b9f3abbc17..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-live-migrate.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_live_migrate_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "block_migration": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-live-migrate.yaml b/samples/tasks/scenarios/nova/boot-and-live-migrate.yaml deleted file mode 100644 index f30053b28b..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-live-migrate.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_live_migrate_server: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - block_migration: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-migrate.json b/samples/tasks/scenarios/nova/boot-and-migrate.json deleted file mode 100644 index 44dd857197..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-migrate.json +++ /dev/null @@ -1,31 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_migrate_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-migrate.yaml b/samples/tasks/scenarios/nova/boot-and-migrate.yaml deleted file mode 100644 index 9ff0bda758..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-migrate.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_migrate_server: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-rebuild.json b/samples/tasks/scenarios/nova/boot-and-rebuild.json deleted file mode 100644 index 5e2ad141df..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-rebuild.json +++ /dev/null @@ -1,34 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_rebuild_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "from_image": { - "name": "^cirros.*-disk$" - }, - "to_image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-rebuild.yaml b/samples/tasks/scenarios/nova/boot-and-rebuild.yaml deleted file mode 100644 index faf131d69d..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-rebuild.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_rebuild_server: - - - args: - flavor: - name: "{{flavor_name}}" - from_image: - name: "^cirros.*-disk$" - to_image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-show-server.json b/samples/tasks/scenarios/nova/boot-and-show-server.json deleted file mode 100644 index 8f7e4ef6f8..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-show-server.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = image_name or "^cirros.*-disk$" %} -{ - "NovaServers.boot_and_show_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - } - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-show-server.yaml b/samples/tasks/scenarios/nova/boot-and-show-server.yaml deleted file mode 100644 index 9a4082938f..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-show-server.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = image_name or "^cirros.*-disk$" %} ---- - NovaServers.boot_and_show_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-and-update-server.json b/samples/tasks/scenarios/nova/boot-and-update-server.json deleted file mode 100644 index b854599fcc..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-update-server.json +++ /dev/null @@ -1,31 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_update_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-and-update-server.yaml b/samples/tasks/scenarios/nova/boot-and-update-server.yaml deleted file mode 100644 index 636dc227e7..0000000000 --- a/samples/tasks/scenarios/nova/boot-and-update-server.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_update_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-bounce-delete.json b/samples/tasks/scenarios/nova/boot-bounce-delete.json deleted file mode 100644 index 153718d880..0000000000 --- a/samples/tasks/scenarios/nova/boot-bounce-delete.json +++ /dev/null @@ -1,38 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_and_bounce_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false, - "actions": [ - {"hard_reboot": 1}, - {"soft_reboot": 1}, - {"stop_start": 1}, - {"rescue_unrescue": 1} - ] - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-bounce-delete.yaml b/samples/tasks/scenarios/nova/boot-bounce-delete.yaml deleted file mode 100644 index 058a96e53b..0000000000 --- a/samples/tasks/scenarios/nova/boot-bounce-delete.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_and_bounce_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - actions: - - - hard_reboot: 1 - - - soft_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-from-volume-and-delete.json b/samples/tasks/scenarios/nova/boot-from-volume-and-delete.json deleted file mode 100755 index ededc1e707..0000000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-and-delete.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} -{ - "NovaServers.boot_server_from_volume_and_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "volume_size": 10, - "volume_type": "{{volume_type}}", - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-from-volume-and-delete.yaml b/samples/tasks/scenarios/nova/boot-from-volume-and-delete.yaml deleted file mode 100755 index f4c5a9ad80..0000000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-and-delete.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} ---- - NovaServers.boot_server_from_volume_and_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - volume_size: 10 - volume_type: "{{volume_type}}" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-from-volume-and-resize.json b/samples/tasks/scenarios/nova/boot-from-volume-and-resize.json deleted file mode 100644 index aeb5559290..0000000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-and-resize.json +++ /dev/null @@ -1,40 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server_from_volume_and_resize": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "to_flavor": { - "name": "m1.small" - }, - "confirm": true, - "volume_size": 1, - "force_delete": false, - "do_delete": true, - "boot_server_kwargs": {}, - "create_volume_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-from-volume-and-resize.yaml b/samples/tasks/scenarios/nova/boot-from-volume-and-resize.yaml deleted file mode 100644 index 4ab956893d..0000000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-and-resize.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server_from_volume_and_resize: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - to_flavor: - name: "m1.small" - confirm: true - volume_size: 1 - force_delete: false - do_delete: true - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-from-volume-snapshot.json b/samples/tasks/scenarios/nova/boot-from-volume-snapshot.json deleted file mode 100755 index ee3090aed6..0000000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-snapshot.json +++ /dev/null @@ -1,34 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} -{ - "NovaServers.boot_server_from_volume_snapshot": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "volume_size": 10, - "volume_type": "{{volume_type}}" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-from-volume-snapshot.yaml b/samples/tasks/scenarios/nova/boot-from-volume-snapshot.yaml deleted file mode 100755 index f63bf3e41f..0000000000 --- a/samples/tasks/scenarios/nova/boot-from-volume-snapshot.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} ---- - NovaServers.boot_server_from_volume_snapshot: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - volume_size: 10 - volume_type: "{{volume_type}}" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-from-volume.json b/samples/tasks/scenarios/nova/boot-from-volume.json deleted file mode 100755 index 1071e631a6..0000000000 --- a/samples/tasks/scenarios/nova/boot-from-volume.json +++ /dev/null @@ -1,34 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} -{ - "NovaServers.boot_server_from_volume": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "volume_size": 10, - "volume_type": "{{volume_type}}" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-from-volume.yaml b/samples/tasks/scenarios/nova/boot-from-volume.yaml deleted file mode 100755 index e0a24f1a9f..0000000000 --- a/samples/tasks/scenarios/nova/boot-from-volume.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} ---- - NovaServers.boot_server_from_volume: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - volume_size: 10 - volume_type: "{{volume_type}}" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.json b/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.json deleted file mode 100644 index 102a130119..0000000000 --- a/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.json +++ /dev/null @@ -1,31 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_lock_unlock_and_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.yaml b/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.yaml deleted file mode 100644 index e0d226bbf3..0000000000 --- a/samples/tasks/scenarios/nova/boot-lock-unlock-and-delete.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_lock_unlock_and_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-server-and-attach-interface.json b/samples/tasks/scenarios/nova/boot-server-and-attach-interface.json deleted file mode 100644 index 09a964f82d..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-and-attach-interface.json +++ /dev/null @@ -1,43 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} -{ - "NovaServers.boot_server_and_attach_interface": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - }, - "network_create_args": {}, - "subnet_create_args": {}, - "subnet_cidr_start": "1.1.0.0/30", - "boot_server_args": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "network": {}, - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "quotas": { - "neutron": { - "network": -1, - "subnet": -1 - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-and-attach-interface.yaml b/samples/tasks/scenarios/nova/boot-server-and-attach-interface.yaml deleted file mode 100644 index e8d3ddd861..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-and-attach-interface.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} ---- - NovaServers.boot_server_and_attach_interface: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - boot_server_args: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - network: {} - users: - tenants: 2 - users_per_tenant: 2 - quotas: - neutron: - network: -1 - subnet: -1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.json b/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.json deleted file mode 100644 index 6a03128ba0..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} -{ - "NovaServers.boot_server_and_list_interfaces": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - } - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "network": { - "start_cidr": "100.1.0.0/26" - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.yaml b/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.yaml deleted file mode 100644 index 7439252f19..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-and-list-interfaces.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} ---- - NovaServers.boot_server_and_list_interfaces: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - network: - start_cidr: "100.1.0.0/26" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.json b/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.json deleted file mode 100644 index 670bebd78e..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.json +++ /dev/null @@ -1,62 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server_associate_and_dissociate_floating_ip": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 5 - }, - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "context": { - "users": { - "users_per_tenant": 2, - "tenants": 3 - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - }, - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 5 - }, - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "create_floating_ip_args": { - "ext_network": "ext_network_name" - } - }, - "context": { - "users": { - "users_per_tenant": 2, - "tenants": 3 - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.yaml b/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.yaml deleted file mode 100644 index a89c98fe4e..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-associate-and-dissociate-floating-ip.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server_associate_and_dissociate_floating_ip: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: {} - sla: - failure_rate: - max: 0 - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - create_floating_ip_args: - ext_network: "ext_network_name" - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: {} - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.json b/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.json deleted file mode 100644 index 7a6582dc9a..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server_attach_created_volume_and_live_migrate": [ - { - "args": { - "size": 10, - "block_migration": false, - "image": { - "name": "^cirros.*-disk$" - }, - "flavor": { - "name": "{{flavor_name}}" - }, - "boot_server_kwargs": {}, - "create_volume_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.yaml b/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.yaml deleted file mode 100644 index ac27fba25a..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-live-migrate.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server_attach_created_volume_and_live_migrate: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - size: 10 - block_migration: false - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 5 - concurrency: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.json b/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.json deleted file mode 100644 index e29fd4d1ac..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.json +++ /dev/null @@ -1,40 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server_attach_created_volume_and_resize": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "to_flavor": { - "name": "m1.small" - }, - "confirm": true, - "volume_size": 1, - "force_delete": false, - "do_delete": true, - "boot_server_kwargs": {}, - "create_volume_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.yaml b/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.yaml deleted file mode 100644 index b5e0ab43d6..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-created-volume-and-resize.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server_attach_created_volume_and_resize: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - to_flavor: - name: "m1.small" - confirm: true - volume_size: 1 - force_delete: false - do_delete: true - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.json b/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.json deleted file mode 100644 index 5cef165da9..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.json +++ /dev/null @@ -1,36 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} -{ - "NovaServers.boot_server_attach_volume_and_list_attachments": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "{{image_name}}" - }, - "volume_size": 1, - "volume_num": 2, - "boot_server_kwargs": {}, - "create_volume_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.yaml b/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.yaml deleted file mode 100644 index a734ffa74f..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-attach-volume-and-list-attachments.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set image_name = "^(cirros.*-disk|TestVM)$" %} ---- - NovaServers.boot_server_attach_volume_and_list_attachments: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - volume_size: 1 - volume_num: 2 - boot_server_kwargs: {} - create_volume_kwargs: {} - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.json b/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.json deleted file mode 100755 index 9f71dc4dbc..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.json +++ /dev/null @@ -1,36 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} -{ - "NovaServers.boot_server_from_volume_and_live_migrate": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "block_migration": false, - "volume_size": 10, - "volume_type": "{{volume_type}}", - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.yaml b/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.yaml deleted file mode 100755 index 1fcef49258..0000000000 --- a/samples/tasks/scenarios/nova/boot-server-from-volume-and-live-migrate.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{% set volume_type = volume_type or "" %} ---- - NovaServers.boot_server_from_volume_and_live_migrate: - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - block_migration: false - volume_size: 10 - volume_type: "{{volume_type}}" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.json b/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.json deleted file mode 100644 index e4ec761a47..0000000000 --- a/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.snapshot_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.yaml b/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.yaml deleted file mode 100644 index 106eb536ea..0000000000 --- a/samples/tasks/scenarios/nova/boot-snapshot-boot-delete.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.snapshot_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/boot.json b/samples/tasks/scenarios/nova/boot.json deleted file mode 100644 index cd34f41269..0000000000 --- a/samples/tasks/scenarios/nova/boot.json +++ /dev/null @@ -1,31 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.boot_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/boot.yaml b/samples/tasks/scenarios/nova/boot.yaml deleted file mode 100644 index 1591fbff91..0000000000 --- a/samples/tasks/scenarios/nova/boot.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.boot_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.json b/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.json deleted file mode 100644 index 8f52fca2dc..0000000000 --- a/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_aggregate_add_and_remove_host": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.yaml b/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.yaml deleted file mode 100644 index ae3ccb067d..0000000000 --- a/samples/tasks/scenarios/nova/create-aggregate-add-and-remove-host.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_aggregate_add_and_remove_host: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.json b/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.json deleted file mode 100644 index 5d86a06505..0000000000 --- a/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "NovaAggregates.create_aggregate_add_host_and_boot_server": [ - { - "args": { - "image": { - "name": "^cirros.*-disk$" - }, - "metadata": { - "test_metadata": "true" - }, - "availability_zone": "nova", - "ram": 512, - "vcpus": 1, - "disk": 1, - "boot_server_kwargs": {} - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.yaml b/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.yaml deleted file mode 100644 index b0d5886556..0000000000 --- a/samples/tasks/scenarios/nova/create-aggregate-add-host-and-boot-server.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- - NovaAggregates.create_aggregate_add_host_and_boot_server: - - - args: - image: - name: "^cirros.*-disk$" - metadata: - test_metadata: "true" - availability_zone: "nova" - ram: 512 - vcpus: 1 - disk: 1 - boot_server_kwargs: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-delete-aggregate.json b/samples/tasks/scenarios/nova/create-and-delete-aggregate.json deleted file mode 100644 index b02d58f94c..0000000000 --- a/samples/tasks/scenarios/nova/create-and-delete-aggregate.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_and_delete_aggregate": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-delete-aggregate.yaml b/samples/tasks/scenarios/nova/create-and-delete-aggregate.yaml deleted file mode 100644 index e442639b50..0000000000 --- a/samples/tasks/scenarios/nova/create-and-delete-aggregate.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_and_delete_aggregate: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-delete-flavor.json b/samples/tasks/scenarios/nova/create-and-delete-flavor.json deleted file mode 100644 index 33ce7826cf..0000000000 --- a/samples/tasks/scenarios/nova/create-and-delete-flavor.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "NovaFlavors.create_and_delete_flavor": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-delete-flavor.yaml b/samples/tasks/scenarios/nova/create-and-delete-flavor.yaml deleted file mode 100644 index 35d7e7b6e3..0000000000 --- a/samples/tasks/scenarios/nova/create-and-delete-flavor.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- - NovaFlavors.create_and_delete_flavor: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - args: - ram: 500 - vcpus : 1 - disk: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-delete-keypair.json b/samples/tasks/scenarios/nova/create-and-delete-keypair.json deleted file mode 100644 index d163953d81..0000000000 --- a/samples/tasks/scenarios/nova/create-and-delete-keypair.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "NovaKeypair.create_and_delete_keypair": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-delete-keypair.yaml b/samples/tasks/scenarios/nova/create-and-delete-keypair.yaml deleted file mode 100644 index 181609f75c..0000000000 --- a/samples/tasks/scenarios/nova/create-and-delete-keypair.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - NovaKeypair.create_and_delete_keypair: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-delete-server-group.json b/samples/tasks/scenarios/nova/create-and-delete-server-group.json deleted file mode 100644 index 7f8211ece7..0000000000 --- a/samples/tasks/scenarios/nova/create-and-delete-server-group.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "NovaServerGroups.create_and_delete_server_group": [ - { - "args": { - "policies": [ - "affinity" - ] - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "runner": { - "concurrency": 2, - "times": 4, - "type": "constant" - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-delete-server-group.yaml b/samples/tasks/scenarios/nova/create-and-delete-server-group.yaml deleted file mode 100644 index 47fd71122b..0000000000 --- a/samples/tasks/scenarios/nova/create-and-delete-server-group.yaml +++ /dev/null @@ -1,15 +0,0 @@ - NovaServerGroups.create_and_delete_server_group: - - - args: - policies: ["affinity"] - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-get-aggregate-details.json b/samples/tasks/scenarios/nova/create-and-get-aggregate-details.json deleted file mode 100644 index 45efe31ea4..0000000000 --- a/samples/tasks/scenarios/nova/create-and-get-aggregate-details.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_and_get_aggregate_details": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-get-aggregate-details.yaml b/samples/tasks/scenarios/nova/create-and-get-aggregate-details.yaml deleted file mode 100644 index 1bf4fefdc8..0000000000 --- a/samples/tasks/scenarios/nova/create-and-get-aggregate-details.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_and_get_aggregate_details: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-get-flavor.json b/samples/tasks/scenarios/nova/create-and-get-flavor.json deleted file mode 100644 index e75e46f2ea..0000000000 --- a/samples/tasks/scenarios/nova/create-and-get-flavor.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "NovaFlavors.create_and_get_flavor": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-get-flavor.yaml b/samples/tasks/scenarios/nova/create-and-get-flavor.yaml deleted file mode 100644 index b31cec2b3d..0000000000 --- a/samples/tasks/scenarios/nova/create-and-get-flavor.yaml +++ /dev/null @@ -1,19 +0,0 @@ - ---- - NovaFlavors.create_and_get_flavor: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - args: - ram: 500 - vcpus : 1 - disk: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-get-keypair.json b/samples/tasks/scenarios/nova/create-and-get-keypair.json deleted file mode 100644 index 0166998c00..0000000000 --- a/samples/tasks/scenarios/nova/create-and-get-keypair.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "NovaKeypair.create_and_get_keypair": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-get-keypair.yaml b/samples/tasks/scenarios/nova/create-and-get-keypair.yaml deleted file mode 100644 index 22827d36ac..0000000000 --- a/samples/tasks/scenarios/nova/create-and-get-keypair.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NovaKeypair.create_and_get_keypair: - - - args: {} - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-get-server-group.json b/samples/tasks/scenarios/nova/create-and-get-server-group.json deleted file mode 100644 index b0b9faf7a1..0000000000 --- a/samples/tasks/scenarios/nova/create-and-get-server-group.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "NovaServerGroups.create_and_get_server_group": [ - { - "args": { - "policies": [ - "affinity" - ] - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-get-server-group.yaml b/samples/tasks/scenarios/nova/create-and-get-server-group.yaml deleted file mode 100644 index 02dee1fcae..0000000000 --- a/samples/tasks/scenarios/nova/create-and-get-server-group.yaml +++ /dev/null @@ -1,15 +0,0 @@ - NovaServerGroups.create_and_get_server_group: - - - args: - policies: ["affinity"] - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-list-aggregates.json b/samples/tasks/scenarios/nova/create-and-list-aggregates.json deleted file mode 100644 index 7e976223fd..0000000000 --- a/samples/tasks/scenarios/nova/create-and-list-aggregates.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_and_list_aggregates": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-list-aggregates.yaml b/samples/tasks/scenarios/nova/create-and-list-aggregates.yaml deleted file mode 100644 index 6ccd85ec83..0000000000 --- a/samples/tasks/scenarios/nova/create-and-list-aggregates.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_and_list_aggregates: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-list-flavor-access.json b/samples/tasks/scenarios/nova/create-and-list-flavor-access.json deleted file mode 100644 index 9c9ee5f2d2..0000000000 --- a/samples/tasks/scenarios/nova/create-and-list-flavor-access.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "NovaFlavors.create_and_list_flavor_access": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-list-flavor-access.yaml b/samples/tasks/scenarios/nova/create-and-list-flavor-access.yaml deleted file mode 100644 index 18a8e84c25..0000000000 --- a/samples/tasks/scenarios/nova/create-and-list-flavor-access.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - NovaFlavors.create_and_list_flavor_access: - - - args: - ram: 500 - vcpus: 1 - disk: 1 - runner: - type: "constant" - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-list-keypairs.json b/samples/tasks/scenarios/nova/create-and-list-keypairs.json deleted file mode 100644 index 866238c8b9..0000000000 --- a/samples/tasks/scenarios/nova/create-and-list-keypairs.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "NovaKeypair.create_and_list_keypairs": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-list-keypairs.yaml b/samples/tasks/scenarios/nova/create-and-list-keypairs.yaml deleted file mode 100644 index 75cddc9710..0000000000 --- a/samples/tasks/scenarios/nova/create-and-list-keypairs.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - NovaKeypair.create_and_list_keypairs: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-list-server-groups.json b/samples/tasks/scenarios/nova/create-and-list-server-groups.json deleted file mode 100644 index 545c33cb99..0000000000 --- a/samples/tasks/scenarios/nova/create-and-list-server-groups.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "NovaServerGroups.create_and_list_server_groups": [ - { - "args": { - "policies": [ - "affinity" - ], - "all_projects": false - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-list-server-groups.yaml b/samples/tasks/scenarios/nova/create-and-list-server-groups.yaml deleted file mode 100644 index 247bab0743..0000000000 --- a/samples/tasks/scenarios/nova/create-and-list-server-groups.yaml +++ /dev/null @@ -1,16 +0,0 @@ - NovaServerGroups.create_and_list_server_groups: - - - args: - policies: ["affinity"] - all_projects: false - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-and-update-aggregate.json b/samples/tasks/scenarios/nova/create-and-update-aggregate.json deleted file mode 100644 index 65b0c46f2e..0000000000 --- a/samples/tasks/scenarios/nova/create-and-update-aggregate.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaAggregates.create_and_update_aggregate": [ - { - "args": { - "availability_zone": "nova" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-and-update-aggregate.yaml b/samples/tasks/scenarios/nova/create-and-update-aggregate.yaml deleted file mode 100644 index 5fcf5c1aa8..0000000000 --- a/samples/tasks/scenarios/nova/create-and-update-aggregate.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaAggregates.create_and_update_aggregate: - - - args: - availability_zone: "nova" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.json b/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.json deleted file mode 100644 index 51435a542f..0000000000 --- a/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "NovaFlavors.create_flavor_and_add_tenant_access": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.yaml b/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.yaml deleted file mode 100644 index b40a42a09d..0000000000 --- a/samples/tasks/scenarios/nova/create-flavor-and-add-tenant-access.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - NovaFlavors.create_flavor_and_add_tenant_access: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - args: - ram: 500 - vcpus : 1 - disk: 1 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 - diff --git a/samples/tasks/scenarios/nova/create-flavor-and-set-keys.json b/samples/tasks/scenarios/nova/create-flavor-and-set-keys.json deleted file mode 100644 index e150a2967c..0000000000 --- a/samples/tasks/scenarios/nova/create-flavor-and-set-keys.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "NovaFlavors.create_flavor_and_set_keys": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1, - "extra_specs": { - "quota:disk_read_bytes_sec": 10240 - } - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-flavor-and-set-keys.yaml b/samples/tasks/scenarios/nova/create-flavor-and-set-keys.yaml deleted file mode 100644 index c376cc00a6..0000000000 --- a/samples/tasks/scenarios/nova/create-flavor-and-set-keys.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - NovaFlavors.create_flavor_and_set_keys: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - args: - ram: 500 - vcpus : 1 - disk: 1 - extra_specs: - quota:disk_read_bytes_sec: 10240 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/create-flavor.json b/samples/tasks/scenarios/nova/create-flavor.json deleted file mode 100644 index 066e007384..0000000000 --- a/samples/tasks/scenarios/nova/create-flavor.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "NovaFlavors.create_flavor": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "ram": 500, - "vcpus" : 1, - "disk": 1 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/create-flavor.yaml b/samples/tasks/scenarios/nova/create-flavor.yaml deleted file mode 100644 index 9ffd3e7b43..0000000000 --- a/samples/tasks/scenarios/nova/create-flavor.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - NovaFlavors.create_flavor: - - - args: - ram: 500 - vcpus: 1 - disk: 1 - runner: - type: "constant" - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-agents.json b/samples/tasks/scenarios/nova/list-agents.json deleted file mode 100644 index 29417ab33a..0000000000 --- a/samples/tasks/scenarios/nova/list-agents.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "NovaAgents.list_agents": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-agents.yaml b/samples/tasks/scenarios/nova/list-agents.yaml deleted file mode 100644 index 535f868814..0000000000 --- a/samples/tasks/scenarios/nova/list-agents.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - NovaAgents.list_agents: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-aggregates.json b/samples/tasks/scenarios/nova/list-aggregates.json deleted file mode 100644 index 41966cbb84..0000000000 --- a/samples/tasks/scenarios/nova/list-aggregates.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "NovaAggregates.list_aggregates": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-aggregates.yaml b/samples/tasks/scenarios/nova/list-aggregates.yaml deleted file mode 100644 index b070af5d3f..0000000000 --- a/samples/tasks/scenarios/nova/list-aggregates.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - NovaAggregates.list_aggregates: - - - runner: - type: "constant" - concurrency: 2 - times: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-and-get-hypervisors.json b/samples/tasks/scenarios/nova/list-and-get-hypervisors.json deleted file mode 100644 index 9deb4000f9..0000000000 --- a/samples/tasks/scenarios/nova/list-and-get-hypervisors.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaHypervisors.list_and_get_hypervisors": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-and-get-hypervisors.yaml b/samples/tasks/scenarios/nova/list-and-get-hypervisors.yaml deleted file mode 100644 index 40497f12fa..0000000000 --- a/samples/tasks/scenarios/nova/list-and-get-hypervisors.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaHypervisors.list_and_get_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.json b/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.json deleted file mode 100644 index 0f5396ebc0..0000000000 --- a/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaHypervisors.list_and_get_uptime_hypervisors": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.yaml b/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.yaml deleted file mode 100644 index 51e0a2f3e0..0000000000 --- a/samples/tasks/scenarios/nova/list-and-get-uptime-hypervisors.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaHypervisors.list_and_get_uptime_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-and-search-hypervisor.json b/samples/tasks/scenarios/nova/list-and-search-hypervisor.json deleted file mode 100644 index 17e2f26386..0000000000 --- a/samples/tasks/scenarios/nova/list-and-search-hypervisor.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaHypervisors.list_and_search_hypervisors": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-and-search-hypervisor.yaml b/samples/tasks/scenarios/nova/list-and-search-hypervisor.yaml deleted file mode 100644 index d4fdb45f0f..0000000000 --- a/samples/tasks/scenarios/nova/list-and-search-hypervisor.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaHypervisors.list_and_search_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-availability-zones.json b/samples/tasks/scenarios/nova/list-availability-zones.json deleted file mode 100644 index c98ea220e8..0000000000 --- a/samples/tasks/scenarios/nova/list-availability-zones.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "NovaAvailabilityZones.list_availability_zones": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "detailed": true - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-availability-zones.yaml b/samples/tasks/scenarios/nova/list-availability-zones.yaml deleted file mode 100644 index a55c5e704a..0000000000 --- a/samples/tasks/scenarios/nova/list-availability-zones.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - NovaAvailabilityZones.list_availability_zones: - - - args: - detailed: true - runner: - type: "constant" - concurrency: 2 - times: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-flavors.json b/samples/tasks/scenarios/nova/list-flavors.json deleted file mode 100644 index 9ea5b8eeb3..0000000000 --- a/samples/tasks/scenarios/nova/list-flavors.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaFlavors.list_flavors": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "detailed": true - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-flavors.yaml b/samples/tasks/scenarios/nova/list-flavors.yaml deleted file mode 100644 index f0dfd20d74..0000000000 --- a/samples/tasks/scenarios/nova/list-flavors.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaFlavors.list_flavors: - - - args: - detailed: True - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-hypervisors.json b/samples/tasks/scenarios/nova/list-hypervisors.json deleted file mode 100644 index ad593a6c05..0000000000 --- a/samples/tasks/scenarios/nova/list-hypervisors.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "NovaHypervisors.list_hypervisors": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "detailed": true - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-hypervisors.yaml b/samples/tasks/scenarios/nova/list-hypervisors.yaml deleted file mode 100644 index 28b8891698..0000000000 --- a/samples/tasks/scenarios/nova/list-hypervisors.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - NovaHypervisors.list_hypervisors: - - - args: - detailed: True - runner: - type: "constant" - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-images.json b/samples/tasks/scenarios/nova/list-images.json deleted file mode 100644 index 5401a2d796..0000000000 --- a/samples/tasks/scenarios/nova/list-images.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "NovaImages.list_images": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "args": { - "detailed": true - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-images.yaml b/samples/tasks/scenarios/nova/list-images.yaml deleted file mode 100644 index f77ac5a9f2..0000000000 --- a/samples/tasks/scenarios/nova/list-images.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - NovaImages.list_images: - - - args: - detailed: True - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-servers.json b/samples/tasks/scenarios/nova/list-servers.json deleted file mode 100644 index 9d20aa6a24..0000000000 --- a/samples/tasks/scenarios/nova/list-servers.json +++ /dev/null @@ -1,35 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.list_servers": [ - { - "args": { - "detailed": true - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "servers": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "servers_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-servers.yaml b/samples/tasks/scenarios/nova/list-servers.yaml deleted file mode 100644 index 18a6841acb..0000000000 --- a/samples/tasks/scenarios/nova/list-servers.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.list_servers: - - - args: - detailed: True - runner: - type: "constant" - times: 1 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - servers: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - servers_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/list-services.json b/samples/tasks/scenarios/nova/list-services.json deleted file mode 100644 index 9344bddbe6..0000000000 --- a/samples/tasks/scenarios/nova/list-services.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "NovaServices.list_services": [ - { - "runner": { - "type": "constant", - "concurrency": 2, - "times": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/list-services.yaml b/samples/tasks/scenarios/nova/list-services.yaml deleted file mode 100644 index f1520d1684..0000000000 --- a/samples/tasks/scenarios/nova/list-services.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - NovaServices.list_services: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/pause-and-unpause.json b/samples/tasks/scenarios/nova/pause-and-unpause.json deleted file mode 100644 index 6bbf73e9ce..0000000000 --- a/samples/tasks/scenarios/nova/pause-and-unpause.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.pause_and_unpause_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/pause-and-unpause.yaml b/samples/tasks/scenarios/nova/pause-and-unpause.yaml deleted file mode 100644 index fafde61b4f..0000000000 --- a/samples/tasks/scenarios/nova/pause-and-unpause.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.pause_and_unpause_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/resize-server.json b/samples/tasks/scenarios/nova/resize-server.json deleted file mode 100644 index a967c7a549..0000000000 --- a/samples/tasks/scenarios/nova/resize-server.json +++ /dev/null @@ -1,37 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.resize_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "to_flavor": { - "name": "m1.small" - }, - "confirm": true, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 5 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/nova/resize-server.yaml b/samples/tasks/scenarios/nova/resize-server.yaml deleted file mode 100644 index 9dcf309d2a..0000000000 --- a/samples/tasks/scenarios/nova/resize-server.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.resize_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - to_flavor: - name: "m1.small" - confirm: true - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 5 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/resize-shutoff-server.json b/samples/tasks/scenarios/nova/resize-shutoff-server.json deleted file mode 100644 index 334c26eb4a..0000000000 --- a/samples/tasks/scenarios/nova/resize-shutoff-server.json +++ /dev/null @@ -1,37 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.resize_shutoff_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "to_flavor": { - "name": "m1.small" - }, - "confirm": true, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 5, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} - diff --git a/samples/tasks/scenarios/nova/resize-shutoff-server.yaml b/samples/tasks/scenarios/nova/resize-shutoff-server.yaml deleted file mode 100644 index 759b2f2271..0000000000 --- a/samples/tasks/scenarios/nova/resize-shutoff-server.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.resize_shutoff_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - to_flavor: - name: "m1.small" - confirm: true - force_delete: false - runner: - type: "constant" - times: 5 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/shelve-and-unshelve.json b/samples/tasks/scenarios/nova/shelve-and-unshelve.json deleted file mode 100644 index 452724f691..0000000000 --- a/samples/tasks/scenarios/nova/shelve-and-unshelve.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.shelve_and_unshelve_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/shelve-and-unshelve.yaml b/samples/tasks/scenarios/nova/shelve-and-unshelve.yaml deleted file mode 100644 index 48c9aba363..0000000000 --- a/samples/tasks/scenarios/nova/shelve-and-unshelve.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.shelve_and_unshelve_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/statistics-hypervisors.json b/samples/tasks/scenarios/nova/statistics-hypervisors.json deleted file mode 100644 index f04dad6278..0000000000 --- a/samples/tasks/scenarios/nova/statistics-hypervisors.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "NovaHypervisors.statistics_hypervisors": [ - { - "args": {}, - "runner": { - "type": "constant", - "concurrency": 2, - "times": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/statistics-hypervisors.yaml b/samples/tasks/scenarios/nova/statistics-hypervisors.yaml deleted file mode 100644 index e3939a81f6..0000000000 --- a/samples/tasks/scenarios/nova/statistics-hypervisors.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - NovaHypervisors.statistics_hypervisors: - - - args: {} - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/nova/suspend-and-resume.json b/samples/tasks/scenarios/nova/suspend-and-resume.json deleted file mode 100644 index 61545f2eaa..0000000000 --- a/samples/tasks/scenarios/nova/suspend-and-resume.json +++ /dev/null @@ -1,32 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "NovaServers.suspend_and_resume_server": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "force_delete": false - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/nova/suspend-and-resume.yaml b/samples/tasks/scenarios/nova/suspend-and-resume.yaml deleted file mode 100644 index 597449f2d0..0000000000 --- a/samples/tasks/scenarios/nova/suspend-and-resume.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - NovaServers.suspend_and_resume_server: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - force_delete: false - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/quotas/cinder-get.json b/samples/tasks/scenarios/quotas/cinder-get.json deleted file mode 100644 index 627a0aa470..0000000000 --- a/samples/tasks/scenarios/quotas/cinder-get.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "Quotas.cinder_get": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - - } - ] -} diff --git a/samples/tasks/scenarios/quotas/cinder-get.yaml b/samples/tasks/scenarios/quotas/cinder-get.yaml deleted file mode 100644 index 0c6bcd22c1..0000000000 --- a/samples/tasks/scenarios/quotas/cinder-get.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Quotas.cinder_get: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/quotas/cinder-update-and-delete.json b/samples/tasks/scenarios/quotas/cinder-update-and-delete.json deleted file mode 100644 index ba4194f996..0000000000 --- a/samples/tasks/scenarios/quotas/cinder-update-and-delete.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Quotas.cinder_update_and_delete": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/cinder-update-and-delete.yaml b/samples/tasks/scenarios/quotas/cinder-update-and-delete.yaml deleted file mode 100644 index 7d625fe5de..0000000000 --- a/samples/tasks/scenarios/quotas/cinder-update-and-delete.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/quotas/cinder-update.json b/samples/tasks/scenarios/quotas/cinder-update.json deleted file mode 100644 index a3c47a241b..0000000000 --- a/samples/tasks/scenarios/quotas/cinder-update.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Quotas.cinder_update": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/cinder-update.yaml b/samples/tasks/scenarios/quotas/cinder-update.yaml deleted file mode 100644 index cb1c7e7e0a..0000000000 --- a/samples/tasks/scenarios/quotas/cinder-update.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Quotas.cinder_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/quotas/neutron-update.json b/samples/tasks/scenarios/quotas/neutron-update.json deleted file mode 100644 index 3308961ab6..0000000000 --- a/samples/tasks/scenarios/quotas/neutron-update.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Quotas.neutron_update": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/neutron-update.yaml b/samples/tasks/scenarios/quotas/neutron-update.yaml deleted file mode 100644 index 885d711523..0000000000 --- a/samples/tasks/scenarios/quotas/neutron-update.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Quotas.neutron_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/quotas/nova-get.json b/samples/tasks/scenarios/quotas/nova-get.json deleted file mode 100644 index 86d99fbd1b..0000000000 --- a/samples/tasks/scenarios/quotas/nova-get.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "Quotas.nova_get": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - - } - ] -} diff --git a/samples/tasks/scenarios/quotas/nova-get.yaml b/samples/tasks/scenarios/quotas/nova-get.yaml deleted file mode 100644 index 15602ba42f..0000000000 --- a/samples/tasks/scenarios/quotas/nova-get.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - Quotas.nova_get: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/quotas/nova-update-and-delete.json b/samples/tasks/scenarios/quotas/nova-update-and-delete.json deleted file mode 100644 index 31a060bd7c..0000000000 --- a/samples/tasks/scenarios/quotas/nova-update-and-delete.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Quotas.nova_update_and_delete": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/nova-update-and-delete.yaml b/samples/tasks/scenarios/quotas/nova-update-and-delete.yaml deleted file mode 100644 index 52e6fd0914..0000000000 --- a/samples/tasks/scenarios/quotas/nova-update-and-delete.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Quotas.nova_update_and_delete: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/quotas/nova-update.json b/samples/tasks/scenarios/quotas/nova-update.json deleted file mode 100644 index 3eb1bb58af..0000000000 --- a/samples/tasks/scenarios/quotas/nova-update.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "Quotas.nova_update": [ - { - "args": { - "max_quota": 1024 - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/quotas/nova-update.yaml b/samples/tasks/scenarios/quotas/nova-update.yaml deleted file mode 100644 index 5c16db389d..0000000000 --- a/samples/tasks/scenarios/quotas/nova-update.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - Quotas.nova_update: - - - args: - max_quota: 1024 - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/sahara/create-and-delete-cluster.json b/samples/tasks/scenarios/sahara/create-and-delete-cluster.json deleted file mode 100644 index b77afbce28..0000000000 --- a/samples/tasks/scenarios/sahara/create-and-delete-cluster.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "SaharaClusters.create_and_delete_cluster": [ - { - "args": { - "master_flavor": { - "name": "m1.large" - }, - "worker_flavor": { - "name": "m1.medium" - }, - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.3.0", - "auto_security_group": true - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.3.0" - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/create-and-delete-cluster.yaml b/samples/tasks/scenarios/sahara/create-and-delete-cluster.yaml deleted file mode 100644 index 1025791437..0000000000 --- a/samples/tasks/scenarios/sahara/create-and-delete-cluster.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- - SaharaClusters.create_and_delete_cluster: - - - args: - master_flavor: - name: "m1.large" - worker_flavor: - name: "m1.medium" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.3.0" - auto_security_group: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.3.0" - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.json b/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.json deleted file mode 100644 index d93d279f75..0000000000 --- a/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "SaharaNodeGroupTemplates.create_and_list_node_group_templates": [ - { - "args": { - "flavor": { - "name": "m1.small" - } - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.yaml b/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.yaml deleted file mode 100644 index daf8d1a6e8..0000000000 --- a/samples/tasks/scenarios/sahara/create-and-list-node-group-templates.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - SaharaNodeGroupTemplates.create_and_list_node_group_templates: - - - args: - flavor: - name: "m1.small" - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/sahara/create-delete-node-group-templates.json b/samples/tasks/scenarios/sahara/create-delete-node-group-templates.json deleted file mode 100644 index 8dd6c5e50e..0000000000 --- a/samples/tasks/scenarios/sahara/create-delete-node-group-templates.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "SaharaNodeGroupTemplates.create_delete_node_group_templates": [ - { - "args": { - "flavor": { - "name": "m1.small" - } - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/create-delete-node-group-templates.yaml b/samples/tasks/scenarios/sahara/create-delete-node-group-templates.yaml deleted file mode 100644 index 43a0187ce2..0000000000 --- a/samples/tasks/scenarios/sahara/create-delete-node-group-templates.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - SaharaNodeGroupTemplates.create_delete_node_group_templates: - - - args: - flavor: - name: "m1.small" - runner: - type: "constant" - times: 100 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/sahara/create-scale-delete-cluster.json b/samples/tasks/scenarios/sahara/create-scale-delete-cluster.json deleted file mode 100644 index aaa5432417..0000000000 --- a/samples/tasks/scenarios/sahara/create-scale-delete-cluster.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "SaharaClusters.create_scale_delete_cluster": [ - { - "args": { - "master_flavor": { - "name": "m1.large" - }, - "worker_flavor": { - "name": "m1.medium" - }, - "workers_count": 3, - "deltas": [1, -1, 1, -1], - "plugin_name": "vanilla", - "hadoop_version": "2.3.0", - "auto_security_group": true - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.3.0" - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/create-scale-delete-cluster.yaml b/samples/tasks/scenarios/sahara/create-scale-delete-cluster.yaml deleted file mode 100644 index 66ee796bed..0000000000 --- a/samples/tasks/scenarios/sahara/create-scale-delete-cluster.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- - SaharaClusters.create_scale_delete_cluster: - - - args: - master_flavor: - name: "m1.large" - worker_flavor: - name: "m1.medium" - workers_count: 3 - deltas: - - 1 - - -1 - - 1 - - -1 - plugin_name: "vanilla" - hadoop_version: "2.3.0" - auto_security_group: True - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.3.0" - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.json b/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.json deleted file mode 100644 index 74ec7c6095..0000000000 --- a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "SaharaJob.create_launch_job_sequence_with_scaling": [ - { - "args": { - "jobs": [ - { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.fs.TestDFSIO" - }, - "args": ["-write", "-nrFiles", "10", "-fileSize", "100"] - } - }, { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.fs.TestDFSIO" - }, - "args": ["-read", "-nrFiles", "10", "-fileSize", "100"] - } - } - ], - "deltas": [2, 2, 2] - }, - "runner": { - "type": "serial", - "times": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.6.0" - }, - "sahara_job_binaries": { - "libs": [ - { - "name": "tests.jar", - "download_url": "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - } - ] - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.yaml b/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.yaml deleted file mode 100644 index 849432841e..0000000000 --- a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence-scaling.yaml +++ /dev/null @@ -1,59 +0,0 @@ ---- - SaharaJob.create_launch_job_sequence_with_scaling: - - - args: - jobs: - - - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.fs.TestDFSIO" - args: - - "-write" - - "-nrFiles" - - "10" - - "-fileSize" - - "100" - - - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.fs.TestDFSIO" - args: - - "-read" - - "-nrFiles" - - "10" - - "-fileSize" - - "100" - deltas: - - 2 - - 2 - - 2 - runner: - type: "serial" - times: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.6.0" - sahara_job_binaries: - libs: - - - name: "tests.jar" - download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.json b/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.json deleted file mode 100644 index 6b5b74925d..0000000000 --- a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "SaharaJob.create_launch_job_sequence": [ - { - "args": { - "jobs": [ - { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.fs.TestDFSIO" - }, - "args": ["-write", "-nrFiles", "10", "-fileSize", "100"] - } - }, { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.fs.TestDFSIO" - }, - "args": ["-read", "-nrFiles", "10", "-fileSize", "100"] - } - } - ] - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.6.0" - }, - "sahara_job_binaries": { - "libs": [ - { - "name": "tests.jar", - "download_url": "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - } - ] - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.yaml b/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.yaml deleted file mode 100644 index 40737f198a..0000000000 --- a/samples/tasks/scenarios/sahara/jobs/dfsio-job-sequence.yaml +++ /dev/null @@ -1,56 +0,0 @@ ---- - SaharaJob.create_launch_job_sequence: - - - args: - jobs: - - - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.fs.TestDFSIO" - args: - - "-write" - - "-nrFiles" - - "10" - - "-fileSize" - - "100" - - - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.fs.TestDFSIO" - args: - - "-read" - - "-nrFiles" - - "10" - - "-fileSize" - - "100" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.6.0" - sahara_job_binaries: - libs: - - - name: "tests.jar" - download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs/2.6.0/hadoop-hdfs-2.6.0-tests.jar" - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/sahara/jobs/java-action-job.json b/samples/tasks/scenarios/sahara/jobs/java-action-job.json deleted file mode 100644 index b373d9a53c..0000000000 --- a/samples/tasks/scenarios/sahara/jobs/java-action-job.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "SaharaJob.create_launch_job": [ - { - "args": { - "job_type": "Java", - "configs": { - "configs": { - "edp.java.main_class": "org.apache.hadoop.examples.PiEstimator" - }, - "args": ["10", "10"] - } - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.6.0" - }, - "sahara_job_binaries": { - "libs": [ - { - "name": "examples.jar", - "download_url": "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-mapreduce-examples/2.6.0/hadoop-mapreduce-examples-2.6.0.jar" - } - ] - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/jobs/java-action-job.yaml b/samples/tasks/scenarios/sahara/jobs/java-action-job.yaml deleted file mode 100644 index 71d4c260b7..0000000000 --- a/samples/tasks/scenarios/sahara/jobs/java-action-job.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- - SaharaJob.create_launch_job: - - - args: - job_type: "Java" - configs: - configs: - edp.java.main_class: "org.apache.hadoop.examples.PiEstimator" - args: - - "10" - - "10" - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.6.0" - sahara_job_binaries: - libs: - - - name: "examples.jar" - download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-mapreduce-examples/2.6.0/hadoop-mapreduce-examples-2.6.0.jar" - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/sahara/jobs/pig-script-job.json b/samples/tasks/scenarios/sahara/jobs/pig-script-job.json deleted file mode 100644 index de34cf5cfd..0000000000 --- a/samples/tasks/scenarios/sahara/jobs/pig-script-job.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "SaharaJob.create_launch_job": [ - { - "args": { - "job_type": "Pig", - "configs": {} - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "sahara_image": { - "image_url": "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2", - "username": "ubuntu", - "plugin_name": "vanilla", - "hadoop_version": "2.6.0" - }, - "sahara_job_binaries": { - "mains": [ - { - "name": "example.pig", - "download_url": "https://raw.githubusercontent.com/openstack/sahara/master/etc/edp-examples/pig-job/example.pig" - } - ], - "libs": [ - { - "name": "udf.jar", - "download_url": "https://github.com/openstack/sahara/blob/master/etc/edp-examples/pig-job/udf.jar?raw=true" - } - ] - }, - "sahara_input_data_sources": { - "input_type": "hdfs", - "input_url": "/" - }, - "sahara_output_data_sources": { - "output_type": "hdfs", - "output_url_prefix": "/out_" - }, - "sahara_cluster": { - "master_flavor_id": "4", - "worker_flavor_id": "3", - "workers_count": 3, - "plugin_name": "vanilla", - "hadoop_version": "2.6.0", - "auto_security_group": true - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/sahara/jobs/pig-script-job.yaml b/samples/tasks/scenarios/sahara/jobs/pig-script-job.yaml deleted file mode 100644 index dbd0d2e4a2..0000000000 --- a/samples/tasks/scenarios/sahara/jobs/pig-script-job.yaml +++ /dev/null @@ -1,45 +0,0 @@ ---- - SaharaJob.create_launch_job: - - - args: - job_type: "Pig" - configs: {} - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - sahara_image: - image_url: "http://sahara-files.mirantis.com/mos70/sahara-kilo-vanilla-2.6.0-ubuntu-14.04.qcow2" - username: "ubuntu" - plugin_name: "vanilla" - hadoop_version: "2.6.0" - sahara_job_binaries: - mains: - - - name: "example.pig" - download_url: "https://raw.githubusercontent.com/openstack/sahara/master/etc/edp-examples/pig-job/example.pig" - libs: - - - name: "udf.jar" - download_url: "https://github.com/openstack/sahara/blob/master/etc/edp-examples/pig-job/udf.jar?raw=true" - sahara_input_data_sources: - input_type: "hdfs" - input_url: "/" - sahara_output_data_sources: - output_type: "hdfs" - output_url_prefix: "/out_" - sahara_cluster: - master_flavor_id: "4" - worker_flavor_id: "3" - workers_count: 3 - plugin_name: "vanilla" - hadoop_version: "2.6.0" - auto_security_group: True - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.json b/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.json deleted file mode 100644 index 844f106c8f..0000000000 --- a/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "SenlinClusters.create_and_delete_cluster": [ - { - "args": { - "desired_capacity": 3, - "min_size": 0, - "max_size": 5 - }, - "runner": { - "type": "constant", - "times": 3, - "concurrency": 1 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "profiles": { - "type": "os.nova.server", - "version": "1.0", - "properties": { - "name": "cirros_server", - "flavor": 1, - "image": "cirros-0.3.5-x86_64-disk", - "networks": [ - { "network": "private" } - ] - } - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.yaml b/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.yaml deleted file mode 100644 index 701d4e5128..0000000000 --- a/samples/tasks/scenarios/senlin/create-and-delete-profile-cluster.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- - SenlinClusters.create_and_delete_cluster: - - - args: - desired_capacity: 3 - min_size: 0 - max_size: 5 - runner: - type: "constant" - times: 3 - concurrency: 1 - context: - users: - tenants: 1 - users_per_tenant: 1 - profiles: - type: os.nova.server - version: "1.0" - properties: - name: cirros_server - flavor: 1 - image: "cirros-0.3.5-x86_64-disk" - networks: - - network: private - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.json b/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.json deleted file mode 100644 index e29e50c653..0000000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "SwiftObjects.create_container_and_object_then_delete_all": [ - { - "args": { - "objects_per_container": 5, - "object_size": 102400 - }, - "runner": { - "type": "constant", - "times": 4, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.yaml b/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.yaml deleted file mode 100644 index 30e3edd64f..0000000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-delete-all.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - SwiftObjects.create_container_and_object_then_delete_all: - - - args: - objects_per_container: 5 - object_size: 102400 - runner: - type: "constant" - times: 4 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.json b/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.json deleted file mode 100644 index cfaf8e1949..0000000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "SwiftObjects.create_container_and_object_then_download_object": [ - { - "args": { - "objects_per_container": 5, - "object_size": 1024 - }, - "runner": { - "type": "constant", - "times": 6, - "concurrency": 3 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.yaml b/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.yaml deleted file mode 100644 index 0f8cce79e1..0000000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-download-object.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - SwiftObjects.create_container_and_object_then_download_object: - - - args: - objects_per_container: 5 - object_size: 1024 - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.json b/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.json deleted file mode 100644 index eed983bd27..0000000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "SwiftObjects.create_container_and_object_then_list_objects": [ - { - "args": { - "objects_per_container": 2, - "object_size": 5120 - }, - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ] - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.yaml b/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.yaml deleted file mode 100644 index 6987a3284d..0000000000 --- a/samples/tasks/scenarios/swift/create-container-and-object-then-list-objects.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- - SwiftObjects.create_container_and_object_then_list_objects: - - - args: - objects_per_container: 2 - object_size: 5120 - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.json b/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.json deleted file mode 100644 index 7ba4a2ed45..0000000000 --- a/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "SwiftObjects.list_and_download_objects_in_containers": [ - { - "runner": { - "type": "constant", - "times": 2, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ], - "swift_objects": { - "containers_per_tenant": 2, - "objects_per_container": 5, - "object_size": 10240 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.yaml b/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.yaml deleted file mode 100644 index d567dae893..0000000000 --- a/samples/tasks/scenarios/swift/list-and-download-objects-in-containers.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - SwiftObjects.list_and_download_objects_in_containers: - - - runner: - type: "constant" - times: 2 - concurrency: 2 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 2 - objects_per_container: 5 - object_size: 10240 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/swift/list-objects-in-containers.json b/samples/tasks/scenarios/swift/list-objects-in-containers.json deleted file mode 100644 index 1e6d6c1258..0000000000 --- a/samples/tasks/scenarios/swift/list-objects-in-containers.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "SwiftObjects.list_objects_in_containers": [ - { - "runner": { - "type": "constant", - "times": 6, - "concurrency": 3 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "roles": [ - "admin" - ], - "swift_objects": { - "containers_per_tenant": 1, - "objects_per_container": 10, - "object_size": 1024 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/swift/list-objects-in-containers.yaml b/samples/tasks/scenarios/swift/list-objects-in-containers.yaml deleted file mode 100644 index 04cfdd4f06..0000000000 --- a/samples/tasks/scenarios/swift/list-objects-in-containers.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - SwiftObjects.list_objects_in_containers: - - - runner: - type: "constant" - times: 6 - concurrency: 3 - context: - users: - tenants: 1 - users_per_tenant: 1 - roles: - - "admin" - swift_objects: - containers_per_tenant: 1 - objects_per_container: 10 - object_size: 1024 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.json b/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.json deleted file mode 100644 index 34ecf1ce13..0000000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "VMTasks.boot_runcommand_delete": [ - { - "args": { - "flavor": {"name": "m1.small"}, - "command": { - "remote_path": "./instance_test.sh" - }, - "username": "root", - "userdata": "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - }, - "runner": { - "type": "constant", - "times": 1, - "concurrency": 1, - "timeout": 3000 - }, - "context": { - "users": { - "tenants": 1, - "users_per_tenant": 1 - }, - "image_command_customizer": { - "image": {"name": "Fedora-x86_64-20-20140618-sda"}, - "flavor": {"name": "m1.small"}, - "command": { - "local_path": "rally-jobs/extra/install_benchmark.sh", - "remote_path": "./install_benchmark.sh" - }, - "username": "root", - "userdata": "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - }, - "network": {} - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.yaml b/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.yaml deleted file mode 100644 index b528ecfa7a..0000000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-custom-image.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- - VMTasks.boot_runcommand_delete: - - - args: - command: - remote_path: "./instance_test.sh" - flavor: - name: m1.small - userdata: "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - username: root - runner: - concurrency: 1 - timeout: 3000 - times: 1 - type: "constant" - context: - image_command_customizer: - command: - local_path: "rally-jobs/extra/install_benchmark.sh" - remote_path: "./install_benchmark.sh" - flavor: - name: m1.small - image: - name: "Fedora-x86_64-20-20140618-sda" - userdata: "#cloud-config\ndisable_root: 0\nssh_pwauth: 1" - username: root - network: {} - users: - tenants: 1 - users_per_tenant: 1 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.json b/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.json deleted file mode 100644 index 165703df21..0000000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.json +++ /dev/null @@ -1,40 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "VMTasks.boot_runcommand_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "floating_network": "public", - "force_delete": false, - "command": { - "interpreter": "/bin/sh", - "script_inline": "ls -la" - }, - "username": "cirros" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "network": { - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.yaml b/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.yaml deleted file mode 100644 index 837512c53a..0000000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-script-inline.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - VMTasks.boot_runcommand_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - floating_network: "public" - force_delete: false - command: - interpreter: "/bin/sh" - script_inline: "ls -la" - username: "cirros" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.json b/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.json deleted file mode 100644 index 25619704a5..0000000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.json +++ /dev/null @@ -1,43 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "VMTasks.boot_runcommand_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "volume_args": { - "size": 2 - }, - "fixed_network": "private", - "floating_network": "public", - "use_floating_ip": true, - "force_delete": false, - "command": { - "interpreter": "/bin/sh", - "script_file": "samples/tasks/support/instance_test.sh" - }, - "username": "cirros" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.yaml b/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.yaml deleted file mode 100644 index c7860236c2..0000000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete-with-disk.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - VMTasks.boot_runcommand_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - volume_args: - size: 2 - fixed_network: "private" - floating_network: "public" - use_floating_ip: true - force_delete: false - command: - interpreter: "/bin/sh" - script_file: "samples/tasks/support/instance_test.sh" - username: "cirros" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete.json b/samples/tasks/scenarios/vm/boot-runcommand-delete.json deleted file mode 100644 index 8cfd07eceb..0000000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete.json +++ /dev/null @@ -1,40 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "VMTasks.boot_runcommand_delete": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "floating_network": "public", - "force_delete": false, - "command": { - "interpreter": "/bin/sh", - "script_file": "samples/tasks/support/instance_test.sh" - }, - "username": "cirros" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "network": { - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/boot-runcommand-delete.yaml b/samples/tasks/scenarios/vm/boot-runcommand-delete.yaml deleted file mode 100644 index e4f8c3d02c..0000000000 --- a/samples/tasks/scenarios/vm/boot-runcommand-delete.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - VMTasks.boot_runcommand_delete: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - floating_network: "public" - force_delete: false - command: - interpreter: "/bin/sh" - script_file: "samples/tasks/support/instance_test.sh" - username: "cirros" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/vm/dd-load-test.json b/samples/tasks/scenarios/vm/dd-load-test.json deleted file mode 100644 index 4e2dee7349..0000000000 --- a/samples/tasks/scenarios/vm/dd-load-test.json +++ /dev/null @@ -1,37 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} -{ - "VMTasks.dd_load_test": [ - { - "args": { - "flavor": { - "name": "{{flavor_name}}" - }, - "image": { - "name": "^cirros.*-disk$" - }, - "floating_network": "public", - "force_delete": false, - "interpreter": "/bin/sh", - "username": "cirros" - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 3, - "users_per_tenant": 2 - }, - "network": { - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/vm/dd-load-test.yaml b/samples/tasks/scenarios/vm/dd-load-test.yaml deleted file mode 100644 index 43d1857205..0000000000 --- a/samples/tasks/scenarios/vm/dd-load-test.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{% set flavor_name = flavor_name or "m1.tiny" %} ---- - VMTasks.dd_load_test: - - - args: - flavor: - name: "{{flavor_name}}" - image: - name: "^cirros.*-disk$" - floating_network: "public" - force_delete: false - interpreter: "/bin/sh" - username: "cirros" - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 3 - users_per_tenant: 2 - network: {} - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/watcher/create-audit-and-delete.json b/samples/tasks/scenarios/watcher/create-audit-and-delete.json deleted file mode 100644 index 78003dc792..0000000000 --- a/samples/tasks/scenarios/watcher/create-audit-and-delete.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "Watcher.create_audit_and_delete": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "context": { - "users": { - "tenants": 2, - "users_per_tenant": 2 - }, - "audit_templates": { - "audit_templates_per_admin": 5, - "fill_strategy": "round_robin", - "params": [ - { - "goal": { - "name": "dummy" - }, - "strategy": { - "name": "dummy" - } - } - ] - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/watcher/create-audit-and-delete.yaml b/samples/tasks/scenarios/watcher/create-audit-and-delete.yaml deleted file mode 100644 index a1edf8cf4a..0000000000 --- a/samples/tasks/scenarios/watcher/create-audit-and-delete.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- - Watcher.create_audit_and_delete: - - - runner: - type: "constant" - times: 10 - concurrency: 2 - context: - users: - tenants: 2 - users_per_tenant: 2 - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "round_robin" - params: - - goal: - name: "dummy" - strategy: - name: "dummy" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/watcher/create-audit-template-and-delete.json b/samples/tasks/scenarios/watcher/create-audit-template-and-delete.json deleted file mode 100644 index c72e013fa6..0000000000 --- a/samples/tasks/scenarios/watcher/create-audit-template-and-delete.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "Watcher.create_audit_template_and_delete": [ - { - "args": { - "goal": { - "name": "dummy" - }, - "strategy": { - "name": "dummy" - } - }, - "runner": { - "type": "constant", - "times": 10, - "concurrency": 2 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/watcher/create-audit-template-and-delete.yaml b/samples/tasks/scenarios/watcher/create-audit-template-and-delete.yaml deleted file mode 100644 index d9679f27a1..0000000000 --- a/samples/tasks/scenarios/watcher/create-audit-template-and-delete.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - Watcher.create_audit_template_and_delete: - - - args: - goal: - name: "dummy" - strategy: - name: "dummy" - runner: - type: "constant" - times: 10 - concurrency: 2 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/watcher/list-audit-templates.json b/samples/tasks/scenarios/watcher/list-audit-templates.json deleted file mode 100644 index 5730c198db..0000000000 --- a/samples/tasks/scenarios/watcher/list-audit-templates.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "Watcher.list_audit_templates": [ - { - "runner": { - "type": "constant", - "times": 10, - "concurrency": 1 - }, - "context": { - "audit_templates": { - "audit_templates_per_admin": 5, - "fill_strategy": "random", - "params": [ - { - "goal": { - "name": "workload_balancing" - }, - "strategy": { - "name": "workload_stabilization" - } - }, - { - "goal": { - "name": "dummy" - }, - "strategy": { - "name": "dummy" - } - } - ] - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/watcher/list-audit-templates.yaml b/samples/tasks/scenarios/watcher/list-audit-templates.yaml deleted file mode 100644 index f0b9f1dee8..0000000000 --- a/samples/tasks/scenarios/watcher/list-audit-templates.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- - Watcher.list_audit_templates: - - - runner: - type: "constant" - times: 10 - concurrency: 1 - context: - audit_templates: - audit_templates_per_admin: 5 - fill_strategy: "random" - params: - - goal: - name: "workload_balancing" - strategy: - name: "workload_stabilization" - - goal: - name: "dummy" - strategy: - name: "dummy" - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/workload/wordpress.json b/samples/tasks/scenarios/workload/wordpress.json deleted file mode 100644 index 19ae433fa5..0000000000 --- a/samples/tasks/scenarios/workload/wordpress.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "VMTasks.runcommand_heat": [ - { - "runner": { - "type": "constant", - "concurrency": 1, - "timeout": 3000, - "times": 1 - }, - "args": { - "files": { - "wp-instances.yaml": "rally-jobs/extra/workload/wp-instances.yaml" - }, - "workload": { - "username": "fedora", - "resource": [ - "rally.plugins.workload", - "siege.py" - ] - }, - "template": "rally-jobs/extra/workload/wordpress_heat_template.yaml", - "parameters": { - "router_id": "c497caa1-9d73-402b-bcd1-cc269e9af29e", - "instance_type": "gig", - "wp_image": "fedora", - "network_id": "9d477754-e9ba-4560-9b2b-9ce9d36638ce", - "image": "fedora", - "wp_instance_type": "gig", - "wp_instances_count": 2 - } - }, - "context": { - "flavors": [ - { - "vcpus": 1, - "disk": 4, - "ram": 1024, - "name": "gig" - } - ], - "users": { - "users_per_tenant": 1, - "tenants": 1 - } - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/workload/wordpress.yaml b/samples/tasks/scenarios/workload/wordpress.yaml deleted file mode 100644 index 9ed61af441..0000000000 --- a/samples/tasks/scenarios/workload/wordpress.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- - - VMTasks.runcommand_heat: - - - args: - workload: - resource: ["rally.plugins.workload", "siege.py"] - username: "fedora" - template: rally-jobs/extra/workload/wordpress_heat_template.yaml - files: - wp-instances.yaml: rally-jobs/extra/workload/wp-instances.yaml - parameters: - wp_instances_count: 2 - wp_instance_type: gig - instance_type: gig - wp_image: fedora - image: fedora - network_id: 9d477754-e9ba-4560-9b2b-9ce9d36638ce - router_id: c497caa1-9d73-402b-bcd1-cc269e9af29e - - context: - users: - tenants: 1 - users_per_tenant: 1 - flavors: - - name: gig - ram: 1024 - disk: 4 - vcpus: 1 - - runner: - concurrency: 1 - timeout: 3000 - times: 1 - type: constant - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/zaqar/create-queue.json b/samples/tasks/scenarios/zaqar/create-queue.json deleted file mode 100644 index a88e5a4f61..0000000000 --- a/samples/tasks/scenarios/zaqar/create-queue.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "ZaqarBasic.create_queue": [ - { - "args": {}, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/zaqar/create-queue.yaml b/samples/tasks/scenarios/zaqar/create-queue.yaml deleted file mode 100644 index 67e5f3d198..0000000000 --- a/samples/tasks/scenarios/zaqar/create-queue.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - ZaqarBasic.create_queue: - - - args: {} - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/scenarios/zaqar/producer-consumer.json b/samples/tasks/scenarios/zaqar/producer-consumer.json deleted file mode 100644 index 57f51ce95c..0000000000 --- a/samples/tasks/scenarios/zaqar/producer-consumer.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "ZaqarBasic.producer_consumer": [ - { - "args": { - "min_msg_count": 50, - "max_msg_count": 200 - }, - "runner": { - "type": "constant", - "times": 100, - "concurrency": 10 - }, - "sla": { - "failure_rate": { - "max": 0 - } - } - } - ] -} diff --git a/samples/tasks/scenarios/zaqar/producer-consumer.yaml b/samples/tasks/scenarios/zaqar/producer-consumer.yaml deleted file mode 100644 index 6413049c31..0000000000 --- a/samples/tasks/scenarios/zaqar/producer-consumer.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - ZaqarBasic.producer_consumer: - - - args: - min_msg_count: 50 - max_msg_count: 200 - runner: - type: "constant" - times: 100 - concurrency: 10 - sla: - failure_rate: - max: 0 diff --git a/samples/tasks/sla/create-and-delete-user.json b/samples/tasks/sla/dummy.json similarity index 74% rename from samples/tasks/sla/create-and-delete-user.json rename to samples/tasks/sla/dummy.json index 534a097d97..c251a75426 100644 --- a/samples/tasks/sla/create-and-delete-user.json +++ b/samples/tasks/sla/dummy.json @@ -1,11 +1,13 @@ { - "KeystoneBasic.create_delete_user": [ + "Dummy.dummy": [ { - "args": {}, + "args": { + "sleep": 5 + }, "runner": { "type": "constant", - "times": 100, - "concurrency": 10 + "times": 20, + "concurrency": 5 }, "sla": { "max_seconds_per_iteration": 4.0, diff --git a/samples/tasks/sla/create-and-delete-user.yaml b/samples/tasks/sla/dummy.yaml similarity index 72% rename from samples/tasks/sla/create-and-delete-user.yaml rename to samples/tasks/sla/dummy.yaml index e15bd8fb3b..8e7151844c 100644 --- a/samples/tasks/sla/create-and-delete-user.yaml +++ b/samples/tasks/sla/dummy.yaml @@ -1,11 +1,12 @@ --- - KeystoneBasic.create_delete_user: + Dummy.dummy: - - args: {} + args: + sleep: 5 runner: type: "constant" - times: 100 - concurrency: 10 + times: 20 + concurrency: 5 sla: max_seconds_per_iteration: 4.0 failure_rate: diff --git a/samples/tasks/support/README.rst b/samples/tasks/support/README.rst deleted file mode 100644 index 8fb83bb1a8..0000000000 --- a/samples/tasks/support/README.rst +++ /dev/null @@ -1,11 +0,0 @@ -instance_linpack.sh -=================== - -instance_linpack.sh, will kick off a CPU intensive workload within an OpenStack instance. -This script will return the avg gflops and max gflops Linpack reports in a JSON format. -To run this workload, the VM must have linpack installed prior to running. - -instance_test.sh -================ - -The script was absorbed by VMTasks.dd_load_test scenario. diff --git a/samples/tasks/support/instance_linpack.sh b/samples/tasks/support/instance_linpack.sh deleted file mode 100755 index 330180b362..0000000000 --- a/samples/tasks/support/instance_linpack.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh -# Location of Linpack binary -LINPACK='/opt/linpack/xlinpack_xeon64' -type -P $LINPACK &>/dev/null && continue || { echo "Linpack Not Found"; exit 1 } - -# Location to create linpack dat file -LINPACK_DAT='~/linpack.dat' - -NUM_CPU=`cat /proc/cpuinfo | grep processor | wc -l` -export OMP_NUM_THREADS=$NUM_CPU -echo "Sample Intel(R) LINPACK data file (from lininput_xeon64)" > ${LINPACK_DAT} -echo "Intel(R) LINPACK data" >> ${LINPACK_DAT} -echo "1 # number of tests" >> ${LINPACK_DAT} -echo "10514 # problem sizes" >> ${LINPACK_DAT} -echo "20016 # leading dimensions" >> ${LINPACK_DAT} -echo "2 # times to run a test " >> ${LINPACK_DAT} -echo "4 # alignment values (in KBytes)" >> ${LINPACK_DAT} -OUTPUT=$(${LINPACK} < ${LINPACK_DAT} | grep -A 1 Average | grep 20016) -AVERAGE=$(echo $OUTPUT | awk '{print $4}') -MAX=$(echo $OUTPUT | awk '{print $5}') - -echo "{ - \"average_gflops\": $AVERAGE, - \"max_gflops\": $MAX - }" diff --git a/samples/tasks/support/instance_test.sh b/samples/tasks/support/instance_test.sh deleted file mode 100755 index f51dd55c92..0000000000 --- a/samples/tasks/support/instance_test.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -echo "The script was absorbed by VMTasks.dd_load_test scenario." -# If we do not fail it, no one will found the warning message about deprecation -exit 1 diff --git a/tasks/README.rst b/tasks/README.rst deleted file mode 100644 index 6d7d186a0f..0000000000 --- a/tasks/README.rst +++ /dev/null @@ -1,6 +0,0 @@ -================================ -Rally Tasks For Production Usage -================================ - - -Detailed Instruction TBD \ No newline at end of file diff --git a/tasks/openstack/README.rst b/tasks/openstack/README.rst deleted file mode 100644 index 8382eda610..0000000000 --- a/tasks/openstack/README.rst +++ /dev/null @@ -1,50 +0,0 @@ -============================ -OpenStack Certification Task -============================ - -How To Validate & Run Task --------------------------- - -To validate task with your own parameters run: - -.. code-block:: console - - $ rally task validate task.yaml --task-args-file task_arguments.yaml - - -To start task with your own parameters run: - -.. code-block:: console - - $ rally task start task.yaml --task-args-file task_arguments.yaml - - -Task Arguments --------------- - -File task_arguments.yaml contains all task options: - -+------------------------+----------------------------------------------------+ -| Name | Description | -+========================+====================================================+ -| service_list | List of services which should be tested | -+------------------------+----------------------------------------------------+ -| smoke | Dry run without load from 1 user | -+------------------------+----------------------------------------------------+ -| use_existing_users | In case of testing cloud with r/o Keystone e.g. AD | -+------------------------+----------------------------------------------------+ -| image_name | Images name that exist in cloud | -+------------------------+----------------------------------------------------+ -| flavor_name | Flavor name that exist in cloud | -+------------------------+----------------------------------------------------+ -| glance_image_location | URL of image that is used to test Glance upload | -+------------------------+----------------------------------------------------+ -| users_amount | Expected amount of users | -+------------------------+----------------------------------------------------+ -| tenants_amount | Expected amount of tenants | -+------------------------+----------------------------------------------------+ -| controllers_amount | Amount of OpenStack API nodes (controllers) | -+------------------------+----------------------------------------------------+ - -All options have default values, hoverer user should change them to reflect -configuration and size of tested OpenStack cloud. diff --git a/tasks/openstack/macro/macro.yaml b/tasks/openstack/macro/macro.yaml deleted file mode 100644 index 9d9a7700ba..0000000000 --- a/tasks/openstack/macro/macro.yaml +++ /dev/null @@ -1,96 +0,0 @@ -{%- macro user_context(tenants,users_per_tenant, use_existing_users, use_round_robin) -%} -{%- if use_existing_users and caller is not defined -%} {} -{%- else %} - {%- if not use_existing_users %} - users: - tenants: {{ tenants }} - users_per_tenant: {{ users_per_tenant }} - user_choice_method: {{ "round_robin" if use_round_robin else "random" }} - {%- endif %} - {%- if caller is defined %} - {{ caller() }} - {%- endif %} -{%- endif %} -{%- endmacro %} - -{%- macro vm_params(image=none, flavor=none, size=none) %} -{%- if flavor is not none %} - flavor: - name: {{ flavor }} -{%- endif %} -{%- if image is not none %} - image: - name: {{ image }} -{%- endif %} -{%- if size is not none %} - size: {{ size }} -{%- endif %} -{%- endmacro %} - -{%- macro unlimited_volumes() %} - cinder: - gigabytes: -1 - snapshots: -1 - volumes: -1 -{%- endmacro %} - -{%- macro constant_runner(concurrency=1, times=1, is_smoke=True) %} - type: "constant" - {%- if is_smoke %} - concurrency: 1 - times: 1 - {%- else %} - concurrency: {{ concurrency }} - times: {{ times }} - {%- endif %} -{%- endmacro %} - -{%- macro rps_runner(rps=1, times=1, is_smoke=True) %} - type: rps - {%- if is_smoke %} - rps: 1 - times: 1 - {%- else %} - rps: {{ rps }} - times: {{ times }} - {%- endif %} -{%- endmacro %} - -{%- macro no_failures_sla() %} - failure_rate: - max: 0 -{%- endmacro %} - -{%- macro volumes(size=1, volumes_per_tenant=1) %} - volumes: - size: {{ size }} - volumes_per_tenant: {{ volumes_per_tenant }} -{%- endmacro %} - -{%- macro unlimited_nova(keypairs=false) %} - nova: - cores: -1 - floating_ips: -1 - instances: -1 - {%- if keypairs %} - key_pairs: -1 - {%- endif %} - ram: -1 - security_group_rules: -1 - security_groups: -1 -{%- endmacro %} - -{%- macro unlimited_neutron() %} -{% if "neutron" in service_list %} - neutron: - network: -1 - port: -1 - subnet: -1 -{% endif %} -{%- endmacro %} - -{%- macro glance_args(location, container="bare", type="qcow2") %} - container_format: {{ container }} - disk_format: {{ type }} - image_location: {{ location }} -{%- endmacro %} diff --git a/tasks/openstack/scenario/authentication.yaml b/tasks/openstack/scenario/authentication.yaml deleted file mode 100644 index d31f71e6af..0000000000 --- a/tasks/openstack/scenario/authentication.yaml +++ /dev/null @@ -1,8 +0,0 @@ - Authenticate.keystone: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ rps_runner(rps=15*controllers_amount, times=20000*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/tasks/openstack/scenario/cinder.yaml b/tasks/openstack/scenario/cinder.yaml deleted file mode 100644 index 39605583a6..0000000000 --- a/tasks/openstack/scenario/cinder.yaml +++ /dev/null @@ -1,189 +0,0 @@ - CinderVolumes.create_and_attach_volume: - - - args: - {{ vm_params(image_name,flavor_name,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(30, 10*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_delete_snapshot: - - - args: - force: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_delete_volume: - - - args: - size: - max: 1 - min: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - {{ vm_params(image_name,none,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_extend_volume: - - - args: - new_size: 2 - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_list_snapshots: - - - args: - detailed: true - force: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_list_volume: - - - args: - detailed: true - {{ vm_params(image_name,none,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - detailed: true - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(100, 33*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_upload_volume_to_image: - - - args: - container_format: "bare" - disk_format: "raw" - do_delete: true - force: false - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(40, 13*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_from_volume_and_delete_volume: - - - args: - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_nested_snapshots_and_attach_volume: - - - args: - {{ vm_params(image_name,flavor_name,none)}} - nested_level: 1 - size: - max: 1 - min: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(10, 3*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/tasks/openstack/scenario/glance.yaml b/tasks/openstack/scenario/glance.yaml deleted file mode 100644 index 39f46ac897..0000000000 --- a/tasks/openstack/scenario/glance.yaml +++ /dev/null @@ -1,30 +0,0 @@ - GlanceImages.create_and_delete_image: - - - args: - {{ glance_args(location=glance_image_location) }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.create_and_list_image: - - - args: - {{ glance_args(location=glance_image_location) }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.list_images: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/tasks/openstack/scenario/keystone.yaml b/tasks/openstack/scenario/keystone.yaml deleted file mode 100644 index 3db23b386a..0000000000 --- a/tasks/openstack/scenario/keystone.yaml +++ /dev/null @@ -1,62 +0,0 @@ - KeystoneBasic.add_and_remove_user_role: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_add_and_list_user_roles: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_list_tenants: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount),times=min(200, 10*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_delete_role: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_delete_service: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.get_entities: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 3*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_update_and_delete_tenant: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 7*controllers_amount),times=min(200, 67*controllers_amount), is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/tasks/openstack/scenario/neutron.yaml b/tasks/openstack/scenario/neutron.yaml deleted file mode 100644 index f5143c3b2c..0000000000 --- a/tasks/openstack/scenario/neutron.yaml +++ /dev/null @@ -1,262 +0,0 @@ - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_routers: - - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - port: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_networks: - - - args: - network_create_args: {} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: false - name: "_updated" - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - port_update_args: - admin_state_up: false - device_id: "dummy_id" - device_owner: "dummy_owner" - name: "_port_updated" - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - router_create_args: {} - router_update_args: - admin_state_up: false - name: "_router_updated" - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - port: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnet_create_args: {} - subnet_update_args: - enable_dhcp: false - name: "_subnet_updated" - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.neutron_update: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=2*controllers_amount, times=8*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronSubnets.delete_subnets: - - - runner: - type: "constant" - times: 15 - concurrency: 15 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users, use_round_robin) }} - quotas: - neutron: - network: -1 - subnet: -1 - network: - subnets_per_network: 15 - dualstack: True - router: {} diff --git a/tasks/openstack/scenario/nova.yaml b/tasks/openstack/scenario/nova.yaml deleted file mode 100644 index 8f8a8aaa4a..0000000000 --- a/tasks/openstack/scenario/nova.yaml +++ /dev/null @@ -1,195 +0,0 @@ - NovaKeypair.boot_and_delete_server_with_keypair: - - - args: - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaKeypair.create_and_delete_keypair: - - - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=67*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaKeypair.create_and_list_keypairs: - - - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=67*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_bounce_server: - - - args: - actions: - - - hard_reboot: 1 - - - soft_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_delete_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_list_server: - - - args: - detailed: true - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 10*controllers_amount), times=333*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_rebuild_server: - - - args: - {{ vm_params(flavor=flavor_name) }} - from_image: - name: {{ image_name }} - to_image: - name: {{ image_name }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_from_volume_and_delete: - - - args: - {{ vm_params(image_name, flavor_name) }} - volume_size: 5 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_volumes() }} - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 3*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.pause_and_unpause_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - force_delete: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.snapshot_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.nova_update_and_delete: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=min(50, 2*controllers_amount), times=17*controllers_amount, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/tasks/openstack/task.yaml b/tasks/openstack/task.yaml deleted file mode 100644 index 5739e31ce8..0000000000 --- a/tasks/openstack/task.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{%- set glance_image_location = glance_image_location|default("https://download.cirros-cloud.net/0.3.5/cirros-0.3.5-i386-disk.img") %} -{%- set image_name = image_name|default("^(cirros.*-disk|TestVM)$") %} -{%- set flavor_name = flavor_name|default("m1.tiny") %} -{%- set use_existing_users = use_existing_users|default(false) %} -{%- set service_list = service_list|default(["authentication", "cinder", "keystone", "nova", "glance", "neutron"]) %} -{%- set smoke = smoke|default(true) %} -{%- set controllers_amount = controllers_amount|default(1) %} -{%- if smoke %} -{%- set users_amount = 1 %} -{%- set tenants_amount = 1 %} -{%- else %} -{%- set users_amount = users_amount|default(1) %} -{%- set tenants_amount = tenants_amount|default(1) %} -{%- endif %} - -{%- from "macro/macro.yaml" import user_context, vm_params, unlimited_volumes, constant_runner, rps_runner, no_failures_sla -%} -{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args -%} - ---- -{% if "authentication" in service_list %} -{%- include "scenario/authentication.yaml"-%} -{% endif %} - -{% if "cinder" in service_list %} -{%- include "scenario/cinder.yaml"-%} -{% endif %} - -{% if "keystone" in service_list %} -{%- include "scenario/keystone.yaml"-%} -{% endif %} - -{% if "nova" in service_list %} -{%- include "scenario/nova.yaml"-%} -{% endif %} - -{% if "glance" in service_list %} -{%- include "scenario/glance.yaml"-%} -{% endif %} - -{% if "neutron" in service_list %} -{%- include "scenario/neutron.yaml"-%} -{% endif %} diff --git a/tasks/openstack/task_arguments.yaml b/tasks/openstack/task_arguments.yaml deleted file mode 100644 index cbc4eca129..0000000000 --- a/tasks/openstack/task_arguments.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - service_list: - - authentication - - nova - - neutron - - keystone - - cinder - - glance - use_existing_users: false - image_name: "^(cirros.*-disk|TestVM)$" - flavor_name: "m1.tiny" - glance_image_location: "" - smoke: true - users_amount: 1 - tenants_amount: 1 - controllers_amount: 3 - compute_amount: 77 - storage_amount: 20 - network_amount: 1 - diff --git a/tasks/openstack_metrics/task.yaml b/tasks/openstack_metrics/task.yaml deleted file mode 100644 index c4e3a28d8a..0000000000 --- a/tasks/openstack_metrics/task.yaml +++ /dev/null @@ -1,163 +0,0 @@ -{%- set floating_network = floating_network|default("public") %} -{%- set image_name = image_name|default("^(cirros.*-disk|TestVM)$") %} -{%- set flavor_name = flavor_name|default("m1.tiny") %} - -{%- set availability_zone = availability_zone|default("default") %} - -{%- set enable_auth = enable_auth|default(true) %} -{%- set enable_nova = enable_nova|default(true) %} -{%- set enable_glance = enable_glance|default(true) %} -{%- set enable_cinder = enable_cinder|default(true) %} -{%- set enable_neutron = enable_neutron|default(true) %} - -{%- set load = load|default(1) %} - -{%- set enable_admin = enable_admin|default(false) %} - ---- - version: 2 - title: "Collect Key OpenStack Metrics" - description: | - Use this task to collect performance metrics for key OpenStack projects - You can use arguments to specify image, flavor, load and what services - should be tested - subtasks: - {% if enable_auth %} - - - title: "Test performance of authentication" - scenario: - Authenticate.keystone: {} - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - {% endif %} - {% if enable_nova %} - - - title: "Test performance of key VM operations" - scenario: - NovaServers.boot_and_bounce_server: - flavor: - name: "{{flavor_name}}" - image: - name: "{{image_name}}" - availability_zone: "{{availability_zone}}" - force_delete: false - actions: - - - hard_reboot: 1 - - - soft_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - - - title: "Key pair performance" - workloads: - - - scenario: - NovaKeypair.create_and_delete_keypair: {} - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - - - scenario: - NovaKeypair.create_and_list_keypairs: {} - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - - - title: "List flavors and servers" - workloads: - - - scenario: - NovaServers.list_servers: - detailed: True - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - - - scenario: - NovaFlavors.list_flavors: - detailed: True - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - {% endif %} - - {% if enable_glance %} - - - title: "Glance Image List" - scenario: - GlanceImages.list_images: - detailed: True - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - {% endif %} - {% if enable_neutron %} - - - title: "Test Floating Ips" - scenario: - NeutronNetworks.create_and_delete_floating_ips: - floating_network: {{ floating_network }} - floating_ip_args: {} - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - - - title: "Test Security Groups" - scenario: - NeutronSecurityGroup.create_and_delete_security_group_rule: - security_group_args: {} - security_group_rule_args: {} - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - {% if enable_admin %} - - - title: "Test performance networks, subnetworks, routers and interfaces" - scenario: - NeutronNetworks.create_and_delete_routers: - network_create_args: {} - subnet_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnets_per_network: 1 - router_create_args: {} - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - contexts: - network: {} - {% endif %} - {% endif %} - {% if enable_cinder %} - - - title: "Cinder volumes create, delete, attach and detach" - scenario: - CinderVolumes.create_and_attach_volume: - size: 1 - image: - name: {{ image_name }} - flavor: - name: {{ flavor_name }} - create_vm_params: - availability_zone: "{{ availability_zone }}" - runner: - constant: - times: {{ load }} - concurrency: {{ load }} - {% endif %} diff --git a/tests/ci/osresources.py b/tests/ci/osresources.py deleted file mode 100755 index ae9a96ba11..0000000000 --- a/tests/ci/osresources.py +++ /dev/null @@ -1,584 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""List and compare most used OpenStack cloud resources.""" - - -import argparse -import json -import os -import subprocess -import sys - -import six - -from rally.cli import cliutils -from rally.common.plugin import discover -from rally import consts -from rally.plugins.openstack import credential - - -def skip_if_service(service): - def wrapper(func): - def inner(self): - if service in self.clients.services().values(): - return [] - return func(self) - return inner - return wrapper - - -class ResourceManager(object): - - REQUIRED_SERVICE = None - STR_ATTRS = ("id", "name") - - def __init__(self, clients): - self.clients = clients - - def is_available(self): - if self.REQUIRED_SERVICE: - return self.REQUIRED_SERVICE in self.clients.services().values() - return True - - @property - def client(self): - return getattr(self.clients, self.__class__.__name__.lower())() - - def get_resources(self): - all_resources = [] - cls = self.__class__.__name__.lower() - for prop in dir(self): - if not prop.startswith("list_"): - continue - f = getattr(self, prop) - resources = f() or [] - resource_name = prop[5:][:-1] - for raw_res in resources: - res = {"cls": cls, "resource_name": resource_name, - "id": {}, "props": {}} - if not isinstance(raw_res, dict): - raw_res = {k: getattr(raw_res, k) for k in dir(raw_res) - if not k.startswith("_") - if not callable(getattr(raw_res, k))} - for key, value in raw_res.items(): - if key.startswith("_"): - continue - if key in self.STR_ATTRS: - res["id"][key] = value - else: - try: - res["props"][key] = json.dumps(value, indent=2) - except TypeError: - res["props"][key] = str(value) - if not res["id"] and not res["props"]: - print("1: %s" % raw_res) - print("2: %s" % cls) - print("3: %s" % resource_name) - raise ValueError("Failed to represent resource %r" % - raw_res) - all_resources.append(res) - return all_resources - - -class Keystone(ResourceManager): - - REQUIRED_SERVICE = consts.Service.KEYSTONE - - def list_users(self): - return self.client.users.list() - - def list_tenants(self): - if hasattr(self.client, "projects"): - return self.client.projects.list() # V3 - return self.client.tenants.list() # V2 - - def list_roles(self): - return self.client.roles.list() - - def list_ec2credentials(self): - users = self.list_users() - ec2_list = [] - for user in users: - ec2_list.extend( - self.client.ec2.list(user.id)) - return ec2_list - - -class Magnum(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MAGNUM - - def list_cluster_templates(self): - result = [] - marker = None - while True: - ct_list = self.client.cluster_templates.list(marker=marker) - if not ct_list: - break - result.extend(ct_list) - marker = ct_list[-1].uuid - return result - - def list_clusters(self): - result = [] - marker = None - while True: - clusters = self.client.clusters.list(marker=marker) - if not clusters: - break - result.extend(clusters) - marker = clusters[-1].uuid - return result - - -class Mistral(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MISTRAL - - def list_workbooks(self): - return self.client.workbooks.list() - - def list_workflows(self): - return self.client.workflows.list() - - def list_executions(self): - return self.client.executions.list() - - -class Nova(ResourceManager): - - REQUIRED_SERVICE = consts.Service.NOVA - - def list_flavors(self): - return self.client.flavors.list() - - def list_aggregates(self): - return self.client.aggregates.list() - - def list_hypervisors(self): - return self.client.hypervisors.list() - - def list_agents(self): - return self.client.agents.list() - - def list_keypairs(self): - return self.client.keypairs.list() - - def list_servers(self): - return self.client.servers.list( - search_opts={"all_tenants": True}) - - def list_server_groups(self): - return self.client.server_groups.list(all_projects=True) - - def list_services(self): - return self.client.services.list() - - def list_availability_zones(self): - return self.client.availability_zones.list() - - -class Neutron(ResourceManager): - - REQUIRED_SERVICE = consts.Service.NEUTRON - - def has_extension(self, name): - extensions = self.client.list_extensions().get("extensions", []) - return any(ext.get("alias") == name for ext in extensions) - - def list_networks(self): - return self.client.list_networks()["networks"] - - def list_subnets(self): - return self.client.list_subnets()["subnets"] - - def list_routers(self): - return self.client.list_routers()["routers"] - - def list_ports(self): - return self.client.list_ports()["ports"] - - def list_floatingips(self): - return self.client.list_floatingips()["floatingips"] - - def list_security_groups(self): - return self.client.list_security_groups()["security_groups"] - - def list_health_monitors(self): - if self.has_extension("lbaas"): - return self.client.list_health_monitors()["health_monitors"] - - def list_pools(self): - if self.has_extension("lbaas"): - return self.client.list_pools()["pools"] - - def list_vips(self): - if self.has_extension("lbaas"): - return self.client.list_vips()["vips"] - - def list_bgpvpns(self): - if self.has_extension("bgpvpn"): - return self.client.list_bgpvpns()["bgpvpns"] - - -class Glance(ResourceManager): - - REQUIRED_SERVICE = consts.Service.GLANCE - - def list_images(self): - return self.client.images.list() - - -class Heat(ResourceManager): - - REQUIRED_SERVICE = consts.Service.HEAT - - def list_resource_types(self): - return self.client.resource_types.list() - - def list_stacks(self): - return self.client.stacks.list() - - -class Cinder(ResourceManager): - - REQUIRED_SERVICE = consts.Service.CINDER - - def list_availability_zones(self): - return self.client.availability_zones.list() - - def list_backups(self): - return self.client.backups.list() - - def list_volume_snapshots(self): - return self.client.volume_snapshots.list() - - def list_volume_types(self): - return self.client.volume_types.list() - - def list_encryption_types(self): - return self.client.volume_encryption_types.list() - - def list_transfers(self): - return self.client.transfers.list() - - def list_volumes(self): - return self.client.volumes.list(search_opts={"all_tenants": True}) - - def list_qos(self): - return self.client.qos_specs.list() - - -class Senlin(ResourceManager): - - REQUIRED_SERVICE = consts.Service.SENLIN - - def list_clusters(self): - return self.client.clusters() - - def list_profiles(self): - return self.client.profiles() - - -class Manila(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MANILA - - def list_shares(self): - return self.client.shares.list(detailed=False, - search_opts={"all_tenants": True}) - - def list_share_networks(self): - return self.client.share_networks.list( - detailed=False, search_opts={"all_tenants": True}) - - def list_share_servers(self): - return self.client.share_servers.list( - search_opts={"all_tenants": True}) - - -class Gnocchi(ResourceManager): - - REQUIRED_SERVICE = consts.Service.GNOCCHI - - def list_resources(self): - return self.client.resource.list() - - -class Ironic(ResourceManager): - - REQUIRED_SERVICE = consts.Service.IRONIC - - def list_nodes(self): - return self.client.node.list() - - -class Sahara(ResourceManager): - - REQUIRED_SERVICE = consts.Service.SAHARA - - def list_node_group_templates(self): - return self.client.node_group_templates.list() - - -class Murano(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MURANO - - def list_environments(self): - return self.client.environments.list() - - def list_packages(self): - return self.client.packages.list(include_disabled=True) - - -class Designate(ResourceManager): - - REQUIRED_SERVICE = consts.Service.DESIGNATE - - def list_domains(self): - return self.client.domains.list() - - def list_records(self): - result = [] - result.extend(self.client.records.list(domain_id) - for domain_id in self.client.domains.list()) - return result - - def list_servers(self): - return self.client.servers.list() - - def list_zones(self): - return self.clients.designate("2").zones.list() - - def list_recordset(self): - client = self.clients.designate("2") - results = [] - results.extend(client.recordsets.list(zone_id) - for zone_id in client.zones.list()) - return results - - -class Trove(ResourceManager): - - REQUIRED_SERVICE = consts.Service.TROVE - - def list_backups(self): - return self.client.backup.list() - - def list_clusters(self): - return self.client.cluster.list() - - def list_configurations(self): - return self.client.configuration.list() - - def list_databases(self): - return self.client.database.list() - - def list_datastore(self): - return self.client.datastore.list() - - def list_instances(self): - return self.client.list(include_clustered=True) - - def list_modules(self): - return self.client.module.list(datastore="all") - - -class Monasca(ResourceManager): - - REQUIRED_SERVICE = consts.Service.MONASCA - - def list_metrics(self): - return self.client.metrics.list() - - -class Watcher(ResourceManager): - - REQUIRED_SERVICE = consts.Service.WATCHER - - REPR_KEYS = ("uuid", "name") - - def list_audits(self): - return self.client.audit.list() - - def list_audit_templates(self): - return self.client.audit_template.list() - - def list_goals(self): - return self.client.goal.list() - - def list_strategies(self): - return self.client.strategy.list() - - def list_action_plans(self): - return self.client.action_plan.list() - - -class CloudResources(object): - """List and compare cloud resources. - - resources = CloudResources(auth_url=..., ...) - saved_list = resources.list() - - # Do something with the cloud ... - - changes = resources.compare(saved_list) - has_changed = any(changes) - removed, added = changes - """ - - def __init__(self, **kwargs): - self.clients = credential.OpenStackCredential(**kwargs).clients() - - def list(self): - managers_classes = discover.itersubclasses(ResourceManager) - resources = [] - for cls in managers_classes: - manager = cls(self.clients) - if manager.is_available(): - resources.extend(manager.get_resources()) - return resources - - def compare(self, with_list): - def make_uuid(res): - return"%s.%s:%s" % ( - res["cls"], res["resource_name"], - ";".join(["%s=%s" % (k, v) - for k, v in sorted(res["id"].items())])) - - current_resources = dict((make_uuid(r), r) for r in self.list()) - saved_resources = dict((make_uuid(r), r) for r in with_list) - - removed = set(saved_resources.keys()) - set(current_resources.keys()) - removed = [saved_resources[k] for k in sorted(removed)] - added = set(current_resources.keys()) - set(saved_resources.keys()) - added = [current_resources[k] for k in sorted(added)] - - return removed, added - - -def _print_tabular_resources(resources, table_label): - def dict_formatter(d): - return "\n".join("%s:%s" % (k, v) for k, v in d.items()) - - cliutils.print_list( - objs=[dict(r) for r in resources], - fields=("cls", "resource_name", "id", "fields"), - field_labels=("service", "resource type", "id", "fields"), - table_label=table_label, - formatters={"id": lambda d: dict_formatter(d["id"]), - "fields": lambda d: dict_formatter(d["props"])} - ) - print("") - - -def main(): - - parser = argparse.ArgumentParser( - description=("Save list of OpenStack cloud resources or compare " - "with previously saved list.")) - parser.add_argument("--credentials", - type=argparse.FileType("r"), - metavar="", - help="cloud credentials in JSON format") - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument("--dump-list", - type=argparse.FileType("w"), - metavar="", - help="dump resources to given file in JSON format") - group.add_argument("--compare-with-list", - type=argparse.FileType("r"), - metavar="", - help=("compare current resources with a list from " - "given JSON file")) - args = parser.parse_args() - - if args.credentials: - config = json.load(args.credentials) - else: - out = subprocess.check_output(["rally", "deployment", "config", - "--deployment", "devstack"]) - config = json.loads(out if six.PY2 else out.decode("utf-8")) - config = config["openstack"] - config.update(config.pop("admin")) - if "users" in config: - del config["users"] - - resources = CloudResources(**config) - - if args.dump_list: - resources_list = resources.list() - json.dump(resources_list, args.dump_list) - elif args.compare_with_list: - given_list = json.load(args.compare_with_list) - changes = resources.compare(with_list=given_list) - removed, added = changes - - # Cinder has a feature - cache images for speeding-up time of creating - # volumes from images. let's put such cache-volumes into expected list - volume_names = [ - "image-%s" % i["id"]["id"] for i in given_list - if i["cls"] == "glance" and i["resource_name"] == "image"] - - # filter out expected additions - expected = [] - for resource in added: - if ( - (resource["cls"] == "keystone" and - resource["resource_name"] == "role" and - resource["id"].get("name") == "_member_") or - - (resource["cls"] == "neutron" and - resource["resource_name"] == "security_group" and - resource["id"].get("name") == "default") or - - (resource["cls"] == "cinder" and - resource["resource_name"] == "volume" and - resource["id"].get("name") in volume_names) or - - resource["cls"] == "murano" or - - # Glance has issues with uWSGI integration... - resource["cls"] == "glance"): - expected.append(resource) - - for resource in expected: - added.remove(resource) - - if removed: - _print_tabular_resources(removed, "Removed resources") - - if added: - _print_tabular_resources(added, "Added resources (unexpected)") - - if expected: - _print_tabular_resources(expected, "Added resources (expected)") - - if any(changes): - # NOTE(andreykurilin): '1' return value will fail gate job. It is - # ok for changes to Rally project, but changes to other - # projects, which have rally job, should not be affected by - # this check, since in most cases resources are left due - # to wrong cleanup of a particular scenario. - if os.environ.get("ZUUL_PROJECT") == "openstack/rally": - return 1 - return 0 - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/tests/ci/rally_gate_functions.sh b/tests/ci/rally_gate_functions.sh index dbc240b86d..9caedb48e3 100644 --- a/tests/ci/rally_gate_functions.sh +++ b/tests/ci/rally_gate_functions.sh @@ -7,6 +7,8 @@ RALLY_EXTRA_DIR=~/.rally/extra function setUp () { set -x + sudo pip install rally_openstack + JOB_DIR=$1 mkdir -p $RALLY_PLUGINS_DIR @@ -117,8 +119,6 @@ function run () { PYTHON=python fi - $PYTHON $RALLY_DIR/tests/ci/osresources.py --dump-list resources_at_start.txt - set +e rally --rally-debug task start --task $TASK $TASK_ARGS retval=$? @@ -155,15 +155,5 @@ function run () { retval=$? set -e - cp resources_at_start.txt rally-plot/ - if [ "$ZUUL_PROJECT" == "openstack/rally" ];then - $PYTHON $RALLY_DIR/tests/ci/osresources.py\ - --compare-with-list resources_at_start.txt - else - $PYTHON $RALLY_DIR/tests/ci/osresources.py\ - --compare-with-list resources_at_start.txt\ - | gzip > rally-plot/resources_diff.txt.gz - fi - exit $retval } diff --git a/tests/ci/rally_verify.py b/tests/ci/rally_verify.py index 627906a326..6a3b06f899 100755 --- a/tests/ci/rally_verify.py +++ b/tests/ci/rally_verify.py @@ -23,6 +23,8 @@ import subprocess import sys import uuid +from rally_openstack import osclients + from rally import api from rally.ui import utils @@ -164,8 +166,14 @@ class SetUpStep(Step): self.result["status"] = Status.ERROR return - credentials = deployment.get_credentials_for("openstack")["admin"] - clients = credentials.clients() + credentials = None + for platform, creds in deployment.to_dict()["credentials"].items(): + if platform == "openstack": + credentials = creds[0]["admin"] + if credentials is None: + return Status.ERROR, "There is no openstack credentials." + + clients = osclients.Clients(credentials) if self.args.ctx_create_resources: # If the 'ctx-create-resources' arg is provided, delete images and diff --git a/tests/functional/test_cli_deployment.py b/tests/functional/test_cli_deployment.py deleted file mode 100644 index 99d8efb61a..0000000000 --- a/tests/functional/test_cli_deployment.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import unittest - -from tests.functional import utils - - -TEST_ENV = { - "OS_USERNAME": "admin", - "OS_PASSWORD": "admin", - "OS_TENANT_NAME": "admin", - "OS_AUTH_URL": "http://fake/", -} - - -class DeploymentTestCase(unittest.TestCase): - - def test_create_fromenv_list_show(self): - # NOTE(andreykurilin): `rally deployment create --fromenv` is - # hardcoded to OpenStack. Should be fixed as soon as the platforms - # will be introduced. - rally = utils.Rally() - rally.env.update(TEST_ENV) - - rally("deployment create --name t_create_env --fromenv") - self.assertIn("t_create_env", rally("deployment list")) - self.assertIn(TEST_ENV["OS_AUTH_URL"], - rally("deployment show")) - - def test_create_fromfile(self): - rally = utils.Rally() - rally.env.update(TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - with open("/tmp/.tmp.deployment", "w") as f: - f.write(rally("deployment config")) - rally("deployment create --name t_create_file " - "--filename /tmp/.tmp.deployment") - self.assertIn("t_create_file", rally("deployment list")) - - def test_create_empty(self): - rally = utils.Rally() - rally("deployment create --name t_empty") - self.assertEqual("{}", rally("deployment config").strip()) - - def test_destroy(self): - rally = utils.Rally() - rally.env.update(TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - self.assertIn("t_create_env", rally("deployment list")) - rally("deployment destroy") - self.assertNotIn("t_create_env", rally("deployment list")) - - def test_check_success(self): - rally = utils.Rally() - rally("deployment check") - - def test_check_fail(self): - rally = utils.Rally() - rally.env.update(TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - self.assertRaises(utils.RallyCliError, rally, "deployment check") - - def test_check_debug(self): - rally = utils.Rally() - rally.env.update(TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - config = rally("deployment config", getjson=True) - config["openstack"]["admin"]["password"] = "fakepassword" - file = utils.JsonTempFile(config) - rally("deployment create --name t_create_file_debug " - "--filename %s" % file.filename) - self.assertIn("t_create_file_debug", rally("deployment list")) - self.assertEqual(config, rally("deployment config", getjson=True)) - self.assertRaises(utils.RallyCliError, rally, "deployment check") - - try: - rally("--debug deployment check") - except utils.RallyCliError as e: - self.assertIn( - "[-] Unable to authenticate for user %(username)s in" - " project %(tenant_name)s" % - {"username": TEST_ENV["OS_USERNAME"], - "tenant_name": TEST_ENV["OS_TENANT_NAME"]}, - str(e)) - self.assertIn( - "AuthenticationFailed: Failed to authenticate to %(auth_url)s" - " for user '%(username)s' in project '%(tenant_name)s'" % - {"auth_url": TEST_ENV["OS_AUTH_URL"], - "username": TEST_ENV["OS_USERNAME"], - "tenant_name": TEST_ENV["OS_TENANT_NAME"]}, - str(e)) - else: - self.fail("rally deployment fails to raise error for wrong" - " authentication info") - - def test_use(self): - rally = utils.Rally() - rally.env.update(TEST_ENV) - output = rally( - "deployment create --name t_create_env1 --fromenv") - uuid = re.search(r"Using deployment: (?P[0-9a-f\-]{36})", - output).group("uuid") - rally("deployment create --name t_create_env2 --fromenv") - rally("deployment use --deployment %s" % uuid) - current_deployment = utils.get_global("RALLY_DEPLOYMENT", - rally.env) - self.assertEqual(uuid, current_deployment) - - # TODO(andreykurilin): Do not forget to move thes tests while splitting - # rally to main framework and openstack plugins - - def test_create_from_env_openstack_deployment(self): - rally = utils.Rally() - rally.env.update(TEST_ENV) - rally("deployment create --name t_create_env --fromenv") - config = rally("deployment config", getjson=True) - self.assertIn("openstack", config) - self.assertEqual(TEST_ENV["OS_USERNAME"], - config["openstack"]["admin"]["username"]) - self.assertEqual(TEST_ENV["OS_PASSWORD"], - config["openstack"]["admin"]["password"]) - if "project_name" in config["openstack"]["admin"]: - # keystone v3 - self.assertEqual(TEST_ENV["OS_TENANT_NAME"], - config["openstack"]["admin"]["project_name"]) - else: - # keystone v2 - self.assertEqual(TEST_ENV["OS_TENANT_NAME"], - config["openstack"]["admin"]["tenant_name"]) - self.assertEqual(TEST_ENV["OS_AUTH_URL"], - config["openstack"]["auth_url"]) diff --git a/tests/unit/cli/commands/test_task.py b/tests/unit/cli/commands/test_task.py index cf77ea6fe8..6447806cb6 100644 --- a/tests/unit/cli/commands/test_task.py +++ b/tests/unit/cli/commands/test_task.py @@ -161,7 +161,7 @@ class TaskCommandsTestCase(test.TestCase): def test_load_task_including_other_template(self): other_template_path = os.path.join( os.path.dirname(rally.__file__), os.pardir, - "samples/tasks/scenarios/nova/boot.json") + "samples/tasks/scenarios/dummy/dummy.json") input_task = "{%% include \"%s\" %%}" % os.path.basename( other_template_path) expect = self.task._load_and_validate_task(self.real_api, diff --git a/tests/unit/common/db/test_migrations.py b/tests/unit/common/db/test_migrations.py index 5cb1b7c3dc..ac80f5ccde 100644 --- a/tests/unit/common/db/test_migrations.py +++ b/tests/unit/common/db/test_migrations.py @@ -35,10 +35,25 @@ import rally from rally.common import db from rally.common.db import models from rally import consts +from rally.task import context from tests.unit.common.db import test_migrations_base from tests.unit import test as rtest +@context.configure(name="users", platform="testing", order=2) +class UsersContext(context.Context): + def setup(self): + pass + + def cleanup(self): + pass + + +@context.configure(name="volumes", platform="testing", order=3) +class UsersContext(UsersContext): + pass + + class MigrationTestCase(rtest.DBTestCase, test_migrations.ModelsMigrationsSync): """Test for checking of equality models state and migrations. @@ -2167,6 +2182,7 @@ class MigrationWalkTestCase(rtest.DBTestCase, deployment_table.c.uuid == deployment_uuid)) def _pre_upgrade_dc46687661df(self, engine): + deployment_table = db_utils.get_table(engine, "deployments") task_table = db_utils.get_table(engine, "tasks") subtask_table = db_utils.get_table(engine, "subtasks") @@ -2310,10 +2326,10 @@ class MigrationWalkTestCase(rtest.DBTestCase, "description": mock.ANY, "order_of_execution": { "note": mock.ANY, - "order": ["users@openstack.setup", - "volumes@openstack.setup", - "volumes@openstack.cleanup", - "users@openstack.cleanup"]}}, + "order": ["users@testing.setup", + "volumes@testing.setup", + "volumes@testing.cleanup", + "users@testing.cleanup"]}}, "setup": {"started_at": 1483221600.0, "finished_at": 1483221601.99, "atomic_actions": [], diff --git a/tests/unit/common/objects/test_credential.py b/tests/unit/common/objects/test_credential.py deleted file mode 100644 index c32b643458..0000000000 --- a/tests/unit/common/objects/test_credential.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.common import objects -from rally import consts -from tests.unit import test - - -# TODO(astudenov): remove this class in future releases - -class CredentialTestCase(test.TestCase): - - def test_to_dict(self): - credential = objects.Credential( - "foo_url", "foo_user", "foo_password", - tenant_name="foo_tenant", - permission=consts.EndpointPermission.ADMIN) - self.assertEqual({"auth_url": "foo_url", - "username": "foo_user", - "password": "foo_password", - "tenant_name": "foo_tenant", - "region_name": None, - "domain_name": None, - "endpoint": None, - "endpoint_type": None, - "https_insecure": False, - "https_cacert": None, - "project_domain_name": None, - "user_domain_name": None, - "profiler_hmac_key": None, - "profiler_conn_str": None}, - credential.to_dict()) - - def test_to_dict_with_include_permission(self): - credential = objects.Credential( - "foo_url", "foo_user", "foo_password", - tenant_name="foo_tenant", - permission=consts.EndpointPermission.ADMIN) - self.assertEqual({"auth_url": "foo_url", - "username": "foo_user", - "password": "foo_password", - "tenant_name": "foo_tenant", - "region_name": None, - "domain_name": None, - "permission": consts.EndpointPermission.ADMIN, - "endpoint": None, - "endpoint_type": None, - "https_insecure": False, - "https_cacert": None, - "project_domain_name": None, - "user_domain_name": None, - "profiler_hmac_key": None, - "profiler_conn_str": None}, - credential.to_dict(include_permission=True)) - - def test_to_dict_with_kwarg_credential(self): - credential = objects.Credential( - "foo_url", "foo_user", "foo_password", - tenant_name="foo_tenant", - endpoint="foo_endpoint", - permission=consts.EndpointPermission.ADMIN, - endpoint_type=consts.EndpointType.PUBLIC) - self.assertEqual({"auth_url": "foo_url", - "username": "foo_user", - "password": "foo_password", - "tenant_name": "foo_tenant", - "region_name": None, - "domain_name": None, - "endpoint": "foo_endpoint", - "endpoint_type": consts.EndpointType.PUBLIC, - "https_insecure": False, - "https_cacert": None, - "project_domain_name": None, - "user_domain_name": None, - "profiler_hmac_key": None, - "profiler_conn_str": None}, - credential.to_dict()) diff --git a/tests/unit/common/objects/test_deploy.py b/tests/unit/common/objects/test_deploy.py index ccd3b8395f..0f30d92c35 100644 --- a/tests/unit/common/objects/test_deploy.py +++ b/tests/unit/common/objects/test_deploy.py @@ -95,79 +95,6 @@ class DeploymentTestCase(test.TestCase): self.assertEqual({"openstack", "foo"}, set(deploy.get_platforms())) - @mock.patch("rally.common.objects.Deployment.get_all_credentials") - def test_get_credentials_for(self, mock_get_all_credentials): - mock_get_all_credentials.return_value = { - "foo": ["bar"] - } - - deploy = objects.Deployment(deployment=self.env) - - self.assertEqual("bar", deploy.get_credentials_for("foo")) - - def test_get_credentials_for_default(self): - deploy = objects.Deployment(deployment=self.env) - creds = deploy.get_credentials_for("default") - self.assertEqual({"admin": None, "users": []}, creds) - - @mock.patch("rally.plugins.openstack.credential.OpenStackCredential") - def test_get_all_credentials(self, mock_open_stack_credential): - openstack_cred = mock_open_stack_credential - openstack_admin = {"openstack": "admin"} - openstack_user_1 = {"openstack": "user1"} - openstack_user_2 = {"openstack": "user2"} - - deploy = objects.Deployment(deployment=self.env) - deploy._all_credentials = { - "openstack": [{"admin": openstack_admin, - "users": [openstack_user_1, openstack_user_2]}], - "foo": ["something"] - } - - self.assertEqual( - { - "openstack": [ - {"admin": openstack_cred.return_value, - "users": [openstack_cred.return_value, - openstack_cred.return_value]}], - "foo": ["something"]}, - deploy.get_all_credentials()) - - self.assertEqual([mock.call(permission=consts.EndpointPermission.ADMIN, - **openstack_admin), - mock.call(**openstack_user_1), - mock.call(**openstack_user_2)], - openstack_cred.call_args_list) - - deploy._all_credentials = { - "openstack": [{"admin": None, - "users": [openstack_user_1, openstack_user_2]}]} - - self.assertEqual( - { - "openstack": [ - {"admin": None, - "users": [openstack_cred.return_value, - openstack_cred.return_value]}]}, - deploy.get_all_credentials()) - - @mock.patch("rally.plugins.openstack.credential.OpenStackCredential") - def test_get_deprecated(self, mock_open_stack_credential): - credential_inst = mock_open_stack_credential.return_value - - deploy = objects.Deployment(deployment=self.env) - deploy._all_credentials = { - "openstack": [{"admin": {"fake_admin": True}, - "users": [{"fake_user": True}]}]} - - self.assertEqual(credential_inst, deploy["admin"]) - self.assertEqual([credential_inst], deploy["users"]) - - def test_get_credentials_error(self): - deploy = objects.Deployment(deployment=self.env) - self.assertRaises(exceptions.RallyException, - deploy.get_credentials_for, "bar") - def test_to_dict(self): env = mock.Mock( status=env_mgr.STATUS.READY, @@ -267,3 +194,23 @@ class DeploymentTestCase(test.TestCase): "https_insecure": False, "region_name": "FooRegionOne"}}, deploy["config"]) + + @mock.patch("rally.common.objects.Deployment.get_all_credentials") + def test_get_credentials_for(self, mock_get_all_credentials): + mock_get_all_credentials.return_value = { + "foo": ["bar"] + } + + deploy = objects.Deployment(deployment=self.env) + + self.assertEqual("bar", deploy.get_credentials_for("foo")) + + def test_get_credentials_for_default(self): + deploy = objects.Deployment(deployment=self.env) + creds = deploy.get_credentials_for("default") + self.assertEqual({"admin": None, "users": []}, creds) + + def test_get_credentials_error(self): + deploy = objects.Deployment(deployment=self.env) + self.assertRaises(exceptions.RallyException, + deploy.get_credentials_for, "bar") diff --git a/tests/unit/fakes.py b/tests/unit/fakes.py index f1dd70ae95..2527cd90d1 100644 --- a/tests/unit/fakes.py +++ b/tests/unit/fakes.py @@ -20,13 +20,7 @@ import re import string import uuid -from ceilometerclient import exc as ceilometer_exc -from glanceclient import exc import mock -from neutronclient.common import exceptions as neutron_exceptions -from novaclient import exceptions as nova_exceptions -import six -from swiftclient import exceptions as swift_exceptions from rally import api from rally.common import utils as rally_utils @@ -113,240 +107,6 @@ class FakeResource(object): return self.items[key] -class FakeServer(FakeResource): - def suspend(self): - self.status = "SUSPENDED" - - def lock(self): - setattr(self, "OS-EXT-STS:locked", True) - - def unlock(self): - setattr(self, "OS-EXT-STS:locked", False) - - -class FakeImage(FakeResource): - - def __init__(self, manager=None, id="image-id-0", min_ram=0, - size=0, min_disk=0, status="active", name=None): - super(FakeImage, self).__init__(manager, id=id, name=name) - self.min_ram = min_ram - self.size = size - self.min_disk = min_disk - self.status = status - self.update = mock.MagicMock() - - -class FakeStrategy(FakeResource): - pass - - -class FakeGoal(FakeResource): - pass - - -class FakeMurano(FakeResource): - pass - - -class FakeFloatingIP(FakeResource): - pass - - -class FakeFloatingIPPool(FakeResource): - pass - - -class FakeTenant(FakeResource): - - def __init__(self, manager, name): - super(FakeTenant, self).__init__(manager, name=name) - - -class FakeUser(FakeResource): - pass - - -class FakeService(FakeResource): - pass - - -class FakeNetwork(FakeResource): - pass - - -class FakeFlavor(FakeResource): - - def __init__(self, id="flavor-id-0", manager=None, ram=0, disk=0, vcpus=1, - name="flavor-name-0"): - super(FakeFlavor, self).__init__(manager, id=id) - self.ram = ram - self.disk = disk - self.vcpus = vcpus - self.name = name - - -class FakeKeypair(FakeResource): - pass - - -class FakeStack(FakeResource): - pass - - -class FakeDomain(FakeResource): - pass - - -class FakeQuotas(FakeResource): - pass - - -class FakeSecurityGroup(FakeResource): - - def __init__(self, manager=None, rule_manager=None, id=None, name=None): - super(FakeSecurityGroup, self).__init__(manager, id=id, name=name) - self.rule_manager = rule_manager - - @property - def rules(self): - return [rule for rule in self.rule_manager.list() - if rule.parent_group_id == self.id] - - -class FakeSecurityGroupRule(FakeResource): - def __init__(self, name, **kwargs): - super(FakeSecurityGroupRule, self).__init__(name) - if "cidr" in kwargs: - kwargs["ip_range"] = {"cidr": kwargs["cidr"]} - del kwargs["cidr"] - for key, value in kwargs.items(): - self.items[key] = value - setattr(self, key, value) - - -class FakeMetric(FakeResource): - def __init_(self, manager=None, **kwargs): - super(FakeMetric, self).__init__(manager) - self.metric = kwargs.get("metric_name") - self.optional_args = kwargs.get("optional_args", {}) - - -class FakeAlarm(FakeResource): - def __init__(self, manager=None, **kwargs): - super(FakeAlarm, self).__init__(manager) - self.meter_name = kwargs.get("meter_name") - self.threshold = kwargs.get("threshold") - self.state = kwargs.get("state", "fake-alarm-state") - self.alarm_id = kwargs.get("alarm_id", "fake-alarm-id") - self.state = kwargs.get("state", "ok") - self.optional_args = kwargs.get("optional_args", {}) - - -class FakeSample(FakeResource): - def __init__(self, manager=None, **kwargs): - super(FakeSample, self).__init__(manager) - self.counter_name = kwargs.get("counter_name", "fake-counter-name") - self.counter_type = kwargs.get("counter_type", "fake-counter-type") - self.counter_unit = kwargs.get("counter_unit", "fake-counter-unit") - self.counter_volume = kwargs.get("counter_volume", 100) - - @property - def resource_id(self): - return "fake-resource-id" - - def to_dict(self): - return {"counter_name": self.counter_name, - "counter_type": self.counter_type, - "counter_unit": self.counter_unit, - "counter_volume": self.counter_volume, - "resource_id": self.resource_id} - - -class FakeVolume(FakeResource): - @property - def _info(self): - return {"id": "uuid"} - - -class FakeVolumeType(FakeResource): - pass - - -class FakeVolumeTransfer(FakeResource): - pass - - -class FakeVolumeSnapshot(FakeResource): - pass - - -class FakeVolumeBackup(FakeResource): - pass - - -class FakeRole(FakeResource): - pass - - -class FakeQueue(FakeResource): - def __init__(self, manager=None, name="myqueue"): - super(FakeQueue, self).__init__(manager, name) - self.queue_name = name - self.messages = FakeMessagesManager(name) - - def post(self, messages): - for msg in messages: - self.messages.create(**msg) - - def messages(self): - return self.messages.list() - - -class FakeDbInstance(FakeResource): - pass - - -class FakeMessage(FakeResource): - def __init__(self, manager=None, **kwargs): - super(FakeMessage, self).__init__(manager) - self.body = kwargs.get("body", "fake-body") - self.ttl = kwargs.get("ttl", 100) - - -class FakeAvailabilityZone(FakeResource): - def __init__(self, manager=None): - super(FakeAvailabilityZone, self).__init__(manager) - self.zoneName = mock.MagicMock() - self.zoneState = mock.MagicMock() - self.hosts = mock.MagicMock() - - -class FakeWorkbook(FakeResource): - def __init__(self, manager=None): - super(FakeWorkbook, self).__init__(manager) - self.workbook = mock.MagicMock() - - -class FakeWorkflow(FakeResource): - def __init__(self, manager=None): - super(FakeWorkflow, self).__init__(manager) - self.workflow = mock.MagicMock() - - -class FakeExecution(FakeResource): - def __init__(self, manager=None): - super(FakeExecution, self).__init__(manager) - self.execution = mock.MagicMock() - - -class FakeObject(FakeResource): - pass - - -class FakeClusterTemplate(FakeResource): - pass - - class FakeManager(object): def __init__(self): @@ -383,1326 +143,6 @@ class FakeManager(object): return resource -class FakeServerManager(FakeManager): - - def __init__(self, image_mgr=None): - super(FakeServerManager, self).__init__() - self.images = image_mgr or FakeImageManager() - - def get(self, resource_uuid): - server = self.cache.get(resource_uuid) - if server is not None: - return server - raise nova_exceptions.NotFound("Server %s not found" % (resource_uuid)) - - def _create(self, server_class=FakeServer, name=None): - server = self._cache(server_class(self)) - if name is not None: - server.name = name - return server - - def create(self, name, image_id, flavor_id, **kwargs): - return self._create(name=name) - - def create_image(self, server, name): - image = self.images._create() - return image.uuid - - def add_floating_ip(self, server, fip): - pass - - def remove_floating_ip(self, server, fip): - pass - - def delete(self, resource): - if not isinstance(resource, six.string_types): - resource = resource.id - - cached = self.get(resource) - if cached is not None: - cached.status = "DELETED" - del self.cache[resource] - self.resources_order.remove(resource) - - -class FakeImageManager(FakeManager): - - def __init__(self): - super(FakeImageManager, self).__init__() - - def get(self, resource_uuid): - image = self.cache.get(resource_uuid) - if image is not None: - return image - raise exc.HTTPNotFound("Image %s not found" % (resource_uuid)) - - def _create(self, image_class=FakeImage, name=None, id=None): - image = self._cache(image_class(self)) - image.owner = "dummy" - image.id = image.uuid - if name is not None: - image.name = name - return image - - def create(self, name, copy_from, container_format, disk_format): - return self._create(name=name) - - def delete(self, resource): - if not isinstance(resource, six.string_types): - resource = resource.id - - cached = self.get(resource) - if cached is not None: - cached.status = "DELETED" - del self.cache[resource] - self.resources_order.remove(resource) - - -class FakeStrategyManager(FakeManager): - def get(self, resource_name): - for key in self.resources_order: - if self.cache[key].name == resource_name: - return self.cache[key] - - -class FakeGoalManager(FakeManager): - def get(self, resource_name): - for key in self.resources_order: - if self.cache[key].name == resource_name: - return self.cache[key] - - -class FakePackageManager(FakeManager): - - def create(self, package_descr, package_arch, package_class=FakeMurano): - package = self._cache(package_class(self)) - package.name = list(package_arch.keys())[0] - return package - - -class FakeFloatingIPsManager(FakeManager): - - def create(self): - return FakeFloatingIP(self) - - -class FakeFloatingIPPoolsManager(FakeManager): - - def create(self): - return FakeFloatingIPPool(self) - - -class FakeTenantsManager(FakeManager): - - def create(self, name): - return self._cache(FakeTenant(self, name)) - - def update(self, tenant_id, name=None, description=None): - tenant = self.get(tenant_id) - name = name or (tenant.name + "_updated") - desc = description or (tenant.name + "_description_updated") - tenant.name = name - tenant.description = desc - return self._cache(tenant) - - -class FakeNetworkManager(FakeManager): - - def create(self, net_id): - net = FakeNetwork(self) - net.id = net_id - return self._cache(net) - - -class FakeFlavorManager(FakeManager): - - def create(self): - flv = FakeFlavor(self) - return self._cache(flv) - - -class FakeKeypairManager(FakeManager): - - def create(self, name, public_key=None): - kp = FakeKeypair(self) - kp.name = name or kp.name - return self._cache(kp) - - def delete(self, resource): - if not isinstance(resource, six.string_types): - resource = resource.id - - cached = self.get(resource) - if cached is not None: - cached.status = "DELETED" - del self.cache[resource] - self.resources_order.remove(resource) - - -class FakeClusterTemplateManager(FakeManager): - - def create(self, name): - cluster_template = FakeClusterTemplate(self) - cluster_template.name = name or cluster_template.name - return self._cache(cluster_template) - - def delete(self, resource): - if not isinstance(resource, six.string_types): - resource = resource.id - - cached = self.get(resource) - if cached is not None: - del self.cache[resource] - self.resources_order.remove(resource) - - -class FakeStackManager(FakeManager): - - def create(self, name): - stack = FakeStack(self) - stack.name = name or stack.name - return self._cache(stack) - - def delete(self, resource): - if not isinstance(resource, six.string_types): - resource = resource.id - - cached = self.get(resource) - if cached is not None: - cached.status = "DELETE_COMPLETE" - del self.cache[resource] - self.resources_order.remove(resource) - - -class FakeDomainManager(FakeManager): - - def create(self, name): - domain = FakeDomain(self) - domain.name = name or domain.name - return self._cache(domain) - - def delete(self, resource): - if not isinstance(resource, six.string_types): - resource = resource.id - - cached = self.get(resource) - if cached is not None: - cached.status = "DELETE_COMPLETE" - del self.cache[resource] - self.resources_order.remove(resource) - - -class FakeNovaQuotasManager(FakeManager): - - def update(self, tenant_id, **kwargs): - fq = FakeQuotas(self) - return self._cache(fq) - - def delete(self, tenant_id): - pass - - -class FakeCinderQuotasManager(FakeManager): - - def update(self, tenant_id, **kwargs): - fq = FakeQuotas(self) - return self._cache(fq) - - def delete(self, tenant_id): - pass - - -class FakeSecurityGroupManager(FakeManager): - def __init__(self, rule_manager=None): - super(FakeSecurityGroupManager, self).__init__() - self.rule_manager = rule_manager - self.create("default") - - def create(self, name, description=""): - sg = FakeSecurityGroup( - manager=self, - rule_manager=self.rule_manager) - sg.name = name or sg.name - sg.description = description - return self._cache(sg) - - def to_dict(self, obj): - return {"id": obj.id, "name": obj.name} - - def find(self, name, **kwargs): - kwargs["name"] = name - for resource in self.cache.values(): - match = True - for key, value in kwargs.items(): - if getattr(resource, key, None) != value: - match = False - break - if match: - return resource - raise nova_exceptions.NotFound("Security Group not found") - - def delete(self, resource): - if not isinstance(resource, six.string_types): - resource = resource.id - - cached = self.get(resource) - if cached is not None: - cached.status = "DELETED" - del self.cache[resource] - self.resources_order.remove(resource) - - -class FakeSecurityGroupRuleManager(FakeManager): - def __init__(self): - super(FakeSecurityGroupRuleManager, self).__init__() - - def create(self, parent_group_id, **kwargs): - kwargs["parent_group_id"] = parent_group_id - sgr = FakeSecurityGroupRule(self, **kwargs) - return self._cache(sgr) - - -class FakeUsersManager(FakeManager): - - def create(self, username, password, email, tenant_id): - user = FakeUser(manager=self, name=username) - user.name = username or user.name - return self._cache(user) - - -class FakeServicesManager(FakeManager): - - def list(self): - return [] - - -class FakeVolumeManager(FakeManager): - def __init__(self): - super(FakeVolumeManager, self).__init__() - self.__volumes = {} - self.__tenant_id = generate_uuid() - - def create(self, size=None, **kwargs): - volume = FakeVolume(self) - volume.size = size or 1 - volume.name = kwargs.get("display_name", volume.name) - volume.status = "available" - volume.tenant_id = self.__tenant_id - self.__volumes[volume.id] = volume - return self._cache(volume) - - def list(self): - return self.__volumes.values() - - def delete(self, resource): - super(FakeVolumeManager, self).delete(resource.id) - del self.__volumes[resource.id] - - -class FakeVolumeTypeManager(FakeManager): - - def create(self, name): - vol_type = FakeVolumeType(self) - vol_type.name = name or vol_type.name - return self._cache(vol_type) - - -class FakeVolumeTransferManager(FakeManager): - def __init__(self): - super(FakeVolumeTransferManager, self).__init__() - self.__volume_transfers = {} - - def list(self): - return self.__volume_transfers.values() - - def create(self, name): - transfer = FakeVolumeTransfer(self) - transfer.name = name or transfer.name - self.__volume_transfers[transfer.id] = transfer - return self._cache(transfer) - - def delete(self, resource): - super(FakeVolumeTransferManager, self).delete(resource.id) - del self.__volume_transfers[resource.id] - - -class FakeVolumeSnapshotManager(FakeManager): - def __init__(self): - super(FakeVolumeSnapshotManager, self).__init__() - self.__snapshots = {} - self.__tenant_id = generate_uuid() - - def create(self, name, force=False, display_name=None): - snapshot = FakeVolumeSnapshot(self) - snapshot.name = name or snapshot.name - snapshot.status = "available" - snapshot.tenant_id = self.__tenant_id - self.__snapshots[snapshot.id] = snapshot - return self._cache(snapshot) - - def list(self): - return self.__snapshots.values() - - def delete(self, resource): - super(FakeVolumeSnapshotManager, self).delete(resource.id) - del self.__snapshots[resource.id] - - -class FakeVolumeBackupManager(FakeManager): - def __init__(self): - super(FakeVolumeBackupManager, self).__init__() - self.__backups = {} - self.__tenant_id = generate_uuid() - - def create(self, name): - backup = FakeVolumeBackup(self) - backup.name = name or backup.name - self.__backups[backup.id] = backup - return self._cache(backup) - - def list(self): - return self.__backups.values() - - def delete(self, resource): - super(FakeVolumeBackupManager, self).delete(resource.id) - del self.__backups[resource.id] - - -class FakeRolesManager(FakeManager): - - def create(self, role_id, name): - role = FakeRole(self) - role.name = name - role.id = role_id - return self._cache(role) - - def roles_for_user(self, user, tenant): - role = FakeRole(self) - role.name = "admin" - return [role, ] - - def add_user_role(self, user, role, tenant): - pass - - -class FakeMetricManager(FakeManager): - - def create(self, **kwargs): - metric = FakeMetric(self, **kwargs) - return self._cache(metric) - - def get(self, metric_id): - metric = self.find(metric_id=metric_id) - return [metric] - - -class FakeAlarmManager(FakeManager): - - def get(self, alarm_id): - alarm = self.find(alarm_id=alarm_id) - if alarm: - return [alarm] - raise ceilometer_exc.HTTPNotFound( - "Alarm with %s not found" % (alarm_id)) - - def update(self, alarm_id, **fake_alarm_dict_diff): - alarm = self.get(alarm_id)[0] - for attr, value in fake_alarm_dict_diff.items(): - setattr(alarm, attr, value) - return alarm - - def create(self, **kwargs): - alarm = FakeAlarm(self, **kwargs) - return self._cache(alarm) - - def delete(self, alarm_id): - alarm = self.find(alarm_id=alarm_id) - if alarm is not None: - alarm.status = "DELETED" - del self.cache[alarm.id] - self.resources_order.remove(alarm.id) - - def get_state(self, alarm_id): - alarm = self.find(alarm_id=alarm_id) - if alarm is not None: - return getattr(alarm, "state", "fake-alarm-state") - - def get_history(self, alarm_id): - return ["fake-alarm-history"] - - def set_state(self, alarm_id, state): - alarm = self.find(alarm_id=alarm_id) - if alarm is not None: - return setattr(alarm, "state", state) - - -class FakeSampleManager(FakeManager): - - def create(self, **kwargs): - sample = FakeSample(self, **kwargs) - return [self._cache(sample)] - - def list(self): - return ["fake-samples"] - - -class FakeMeterManager(FakeManager): - - def list(self): - return ["fake-meter"] - - -class FakeMetricsManager(FakeManager): - - def list(self): - return ["fake-metric"] - - -class FakeCeilometerResourceManager(FakeManager): - - def get(self, resource_id): - return ["fake-resource-info"] - - def list(self): - return ["fake-resource"] - - -class FakeStatisticsManager(FakeManager): - - def list(self, meter): - return ["%s-statistics" % meter] - - -class FakeQueryManager(FakeManager): - - def query(self, filter, orderby, limit): - return ["fake-query-result"] - - -class FakeQueuesManager(FakeManager): - def __init__(self): - super(FakeQueuesManager, self).__init__() - self.__queues = {} - - def create(self, name): - queue = FakeQueue(self, name) - self.__queues[queue.name] = queue - return self._cache(queue) - - def list(self): - return self.__queues.values() - - def delete(self, queue): - super(FakeQueuesManager, self).delete(queue.name) - del self.__queues[queue.name] - - -class FakeDbInstanceManager(FakeManager): - def __init__(self): - super(FakeDbInstanceManager, self).__init__() - self.__db_instances = {} - - def create(self, name, flavor_id, size): - instance = FakeDbInstance(self) - instance.name = name or instance.name - instance.flavor_id = flavor_id - instance.size = size - return self._cache(instance) - - def list(self): - return self.__db_instances.values() - - def delete(self, resource): - if not isinstance(resource, six.string_types): - resource = resource.id - - cached = self.get(resource) - if cached is not None: - cached.status = "DELETE_COMPLETE" - del self.cache[resource] - self.resources_order.remove(resource) - - -class FakeMessagesManager(FakeManager): - def __init__(self, queue="myqueue"): - super(FakeMessagesManager, self).__init__() - self.__queue = queue - self.__messages = {} - - def create(self, **kwargs): - message = FakeMessage(self, **kwargs) - self.__messages[message.id] = message - return self._cache(message) - - def list(self): - return self.__messages.values() - - def delete(self, message): - super(FakeMessagesManager, self).delete(message.id) - del self.__messages[message.id] - - -class FakeAvailabilityZonesManager(FakeManager): - def __init__(self): - super(FakeAvailabilityZonesManager, self).__init__() - self.zones = FakeAvailabilityZone() - - def list(self): - return [self.zones] - - -class FakeWorkbookManager(FakeManager): - def __init__(self): - super(FakeWorkbookManager, self).__init__() - self.workbook = FakeWorkbook() - - def list(self): - return [self.workbook] - - -class FakeWorkflowManager(FakeManager): - def __init__(self): - super(FakeWorkflowManager, self).__init__() - self.workflow = FakeWorkflow() - - def list(self): - return [self.workflow] - - -class FakeExecutionManager(FakeManager): - def __init__(self): - super(FakeExecutionManager, self).__init__() - self.execution = FakeExecution() - - def list(self): - return [self.execution] - - def create(self): - return self.execution - - -class FakeObjectManager(FakeManager): - - def get_account(self, **kwargs): - containers = self.list() - return (mock.MagicMock(), [{"name": con.name} for con in containers]) - - def get_container(self, name, **kwargs): - container = self.find(name=name) - if container is None: - raise swift_exceptions.ClientException("Container GET failed") - return (mock.MagicMock(), [{"name": obj} for obj in container.items]) - - def put_container(self, name, **kwargs): - if self.find(name=name): - raise swift_exceptions.ClientException("Container PUT failed") - self._cache(FakeObject(name=name)) - - def delete_container(self, name, **kwargs): - container = self.find(name=name) - if container is None or len(container.items.keys()) > 0: - raise swift_exceptions.ClientException("Container DELETE failed") - self.delete(container.uuid) - - def get_object(self, container_name, object_name, **kwargs): - container = self.find(name=container_name) - if container is None or object_name not in container.items: - raise swift_exceptions.ClientException("Object GET failed") - return (mock.MagicMock(), container.items[object_name]) - - def put_object(self, container_name, object_name, content, **kwargs): - container = self.find(name=container_name) - if container is None: - raise swift_exceptions.ClientException("Object PUT failed") - container.items[object_name] = content - return mock.MagicMock() - - def delete_object(self, container_name, object_name, **kwargs): - container = self.find(name=container_name) - if container is None or object_name not in container.items: - raise swift_exceptions.ClientException("Object DELETE failed") - del container.items[object_name] - - -class FakeServiceCatalog(object): - def get_credentials(self): - return {"image": [{"publicURL": "http://fake.to"}], - "metering": [{"publicURL": "http://fake.to"}], - "monitoring": [{"publicURL": "http://fake.to"}]} - - def url_for(self, **kwargs): - return "http://fake.to" - - -class FakeGlanceClient(object): - - def __init__(self, version="1"): - self.images = FakeImageManager() - self.version = version - - -class FakeMuranoClient(object): - - def __init__(self): - self.packages = FakePackageManager() - - -class FakeCinderClient(object): - - def __init__(self): - self.volumes = FakeVolumeManager() - self.volume_types = FakeVolumeTypeManager() - self.transfers = FakeVolumeTransferManager() - self.volume_snapshots = FakeVolumeSnapshotManager() - self.backups = FakeVolumeBackupManager() - self.quotas = FakeCinderQuotasManager() - - -class FakeNovaClient(object): - - def __init__(self, failed_server_manager=False): - self.images = FakeImageManager() - self.servers = FakeServerManager(self.images) - self.floating_ips = FakeFloatingIPsManager() - self.floating_ip_pools = FakeFloatingIPPoolsManager() - self.networks = FakeNetworkManager() - self.flavors = FakeFlavorManager() - self.keypairs = FakeKeypairManager() - self.security_group_rules = FakeSecurityGroupRuleManager() - self.security_groups = FakeSecurityGroupManager( - rule_manager=self.security_group_rules) - self.quotas = FakeNovaQuotasManager() - self.set_management_url = mock.MagicMock() - self.availability_zones = FakeAvailabilityZonesManager() - - -class FakeHeatClient(object): - - def __init__(self): - self.stacks = FakeStackManager() - - -class FakeDesignateClient(object): - - def __init__(self): - self.domains = FakeDomainManager() - - -class FakeKeystoneClient(object): - - def __init__(self): - self.tenants = FakeTenantsManager() - self.users = FakeUsersManager() - self.roles = FakeRolesManager() - self.project_id = "abc123" - self.auth_url = "http://example.com:5000/v2.0/" - self.auth_token = "fake" - self.auth_user_id = generate_uuid() - self.auth_tenant_id = generate_uuid() - self.service_catalog = FakeServiceCatalog() - self.services = FakeServicesManager() - self.region_name = "RegionOne" - self.auth_ref = mock.Mock() - self.auth_ref.role_names = ["admin"] - self.version = "v2.0" - self.session = mock.MagicMock() - self.authenticate = mock.MagicMock() - - def authenticate(self): - return True - - def list_users(self): - return self.users.list() - - def list_projects(self): - return self.tenants.list() - - def list_services(self): - return self.services.list() - - def list_roles(self): - return self.roles.list() - - def delete_user(self, uuid): - return self.users.delete(uuid) - - -class FakeCeilometerClient(object): - - def __init__(self): - self.alarms = FakeAlarmManager() - self.meters = FakeMeterManager() - self.resources = FakeCeilometerResourceManager() - self.statistics = FakeStatisticsManager() - self.samples = FakeSampleManager() - self.query_alarms = FakeQueryManager() - self.query_samples = FakeQueryManager() - self.query_alarm_history = FakeQueryManager() - - -class FakeGnocchiClient(object): - def __init__(self): - self.metric = FakeMetricManager() - - -class FakeMonascaClient(object): - - def __init__(self): - self.metrics = FakeMetricsManager() - - -class FakeNeutronClient(object): - - def __init__(self, **kwargs): - self.__networks = {} - self.__subnets = {} - self.__routers = {} - self.__ports = {} - self.__pools = {} - self.__vips = {} - self.__fips = {} - self.__healthmonitors = {} - self.__tenant_id = kwargs.get("tenant_id", generate_uuid()) - - self.format = "json" - self.version = "2.0" - - @staticmethod - def _filter(resource_list, search_opts): - return [res for res in resource_list - if all(res[field] == value - for field, value in search_opts.items())] - - def add_interface_router(self, router_id, data): - subnet_id = data["subnet_id"] - - if (router_id not in self.__routers or - subnet_id not in self.__subnets): - raise neutron_exceptions.NeutronClientException - - subnet = self.__subnets[subnet_id] - - port = self.create_port( - {"port": {"network_id": subnet["network_id"]}})["port"] - port["device_id"] = router_id - port["fixed_ips"].append({"subnet_id": subnet_id, - "ip_address": subnet["gateway_ip"]}) - - return {"subnet_id": subnet_id, - "tenant_id": port["tenant_id"], - "port_id": port["id"], - "id": router_id} - - def create_network(self, data): - network = setup_dict(data["network"], - defaults={"name": generate_name("net_"), - "admin_state_up": True}) - network_id = generate_uuid() - network.update({"id": network_id, - "status": "ACTIVE", - "subnets": [], - "provider:physical_network": None, - "tenant_id": self.__tenant_id, - "provider:network_type": "local", - "router:external": True, - "shared": False, - "provider:segmentation_id": None}) - self.__networks[network_id] = network - return {"network": network} - - def create_pool(self, data): - pool = setup_dict(data["pool"], - required=["lb_method", "protocol", "subnet_id"], - defaults={"name": generate_name("pool_"), - "admin_state_up": True}) - if pool["subnet_id"] not in self.__subnets: - raise neutron_exceptions.NeutronClientException - pool_id = generate_uuid() - - pool.update({"id": pool_id, - "status": "PENDING_CREATE", - "tenant_id": self.__tenant_id}) - self.__pools[pool_id] = pool - return {"pool": pool} - - def create_vip(self, data): - vip = setup_dict(data["vip"], - required=["protocol_port", "protocol", "subnet_id", - "pool_id"], - defaults={"name": generate_name("vip_"), - "admin_state_up": True}) - if (vip["subnet_id"] not in self.__subnets) or (vip["pool_id"] not in - self.__pools): - raise neutron_exceptions.NeutronClientException - vip_id = generate_uuid() - - vip.update({"id": vip_id, - "status": "PENDING_CREATE", - "tenant_id": self.__tenant_id}) - self.__vips[vip_id] = vip - return {"vip": vip} - - def create_floatingip(self, data): - fip = setup_dict(data["floatingip"], - required=["floating_network"], - defaults={"admin_state_up": True}) - if (fip["floating_network"] not in self.__nets): - raise neutron_exceptions.NeutronClientException - fip_id = generate_uuid() - - fip.update({"id": fip_id, - "tenant_id": self.__tenant_id}) - self.__fips[fip_id] = fip - return {"fip": fip} - - def create_health_monitor(self, data): - healthmonitor = setup_dict(data["healthmonitor"], - required=["type", "timeout", "delay", - "max_retries"], - defaults={"admin_state_up": True}) - healthmonitor_id = generate_uuid() - - healthmonitor.update({"id": healthmonitor_id, - "status": "PENDING_CREATE", - "tenant_id": self.__tenant_id}) - self.__healthmonitors[healthmonitor_id] = healthmonitor - return {"healthmonitor": healthmonitor} - - def create_port(self, data): - port = setup_dict(data["port"], - required=["network_id"], - defaults={"name": generate_name("port_"), - "admin_state_up": True}) - if port["network_id"] not in self.__networks: - raise neutron_exceptions.NeutronClientException - - port_id = generate_uuid() - port.update({"id": port_id, - "status": "ACTIVE", - "binding:host_id": "fakehost", - "extra_dhcp_opts": [], - "binding:vnic_type": "normal", - "binding:vif_type": "ovs", - "device_owner": "", - "mac_address": generate_mac(), - "binding:profile": {}, - "binding:vif_details": {u"port_filter": True}, - "security_groups": [], - "fixed_ips": [], - "device_id": "", - "tenant_id": self.__tenant_id, - "allowed_address_pairs": []}) - self.__ports[port_id] = port - return {"port": port} - - def create_router(self, data): - router = setup_dict(data["router"], - defaults={"name": generate_name("router_"), - "external_gateway_info": None, - "admin_state_up": True}) - router_id = generate_uuid() - router.update({"id": router_id, - "status": "ACTIVE", - "external_gateway_info": None, - "tenant_id": self.__tenant_id}) - self.__routers[router_id] = router - return {"router": router} - - def create_subnet(self, data): - subnet = setup_dict( - data["subnet"], - required=["network_id", "cidr", "ip_version"], - defaults={"name": generate_name("subnet_"), - "dns_nameservers": ["8.8.8.8", "8.8.4.4"]}) - if subnet["network_id"] not in self.__networks: - raise neutron_exceptions.NeutronClientException - - subnet_id = generate_uuid() - subnet.update({"id": subnet_id, - "enable_dhcp": True, - "tenant_id": self.__tenant_id, - "ipv6_ra_mode": None, - "allocation_pools": [], - "gateway_ip": re.sub("./.*$", "1", subnet["cidr"]), - "ipv6_address_mode": None, - "ip_version": 4, - "host_routes": []}) - self.__subnets[subnet_id] = subnet - return {"subnet": subnet} - - def update_resource(self, resource_id, resource_dict, data): - if resource_id not in resource_dict: - raise neutron_exceptions.NeutronClientException - self.resource_list[resource_id].update(data) - - def update_network(self, network_id, data): - self.update_resource(network_id, self.__networks, data) - - def update_pool(self, pool_id, data): - self.update_resource(pool_id, self.__pools, data) - - def update_vip(self, vip_id, data): - self.update_resource(vip_id, self.__vips, data) - - def update_health_monitor(self, healthmonitor_id, data): - self.update_resource(healthmonitor_id, self.__healthmonitors, data) - - def update_subnet(self, subnet_id, data): - self.update_resource(subnet_id, self.__subnets, data) - - def update_port(self, port_id, data): - self.update_resource(port_id, self.__ports, data) - - def update_router(self, router_id, data): - self.update_resource(router_id, self.__routers, data) - - def delete_network(self, network_id): - if network_id not in self.__networks: - raise neutron_exceptions.NeutronClientException - for port in self.__ports.values(): - if port["network_id"] == network_id: - # Network is in use by port - raise neutron_exceptions.NeutronClientException - del self.__networks[network_id] - return "" - - def delete_pool(self, pool_id): - if pool_id not in self.__pools: - raise neutron_exceptions.NeutronClientException - del self.__pools[pool_id] - return "" - - def delete_vip(self, vip_id): - if vip_id not in self.__vips: - raise neutron_exceptions.NeutronClientException - del self.__vips[vip_id] - - def delete_health_monitor(self, healthmonitor_id): - if healthmonitor_id not in self.__healthmonitors: - raise neutron_exceptions.NeutronClientException - del self.__healthmonitors[healthmonitor_id] - return "" - - def delete_floatingip(self, fip_id): - if fip_id not in self.__fips: - raise neutron_exceptions.NeutronClientException - del self.__fips[fip_id] - return "" - - def delete_port(self, port_id): - if port_id not in self.__ports: - raise neutron_exceptions.PortNotFoundClient - if self.__ports[port_id]["device_owner"]: - # Port is owned by some device - raise neutron_exceptions.NeutronClientException - del self.__ports[port_id] - return "" - - def delete_router(self, router_id): - if router_id not in self.__routers: - raise neutron_exceptions.NeutronClientException - for port in self.__ports.values(): - if port["device_id"] == router_id: - # Router has active port - raise neutron_exceptions.NeutronClientException - del self.__routers[router_id] - return "" - - def delete_subnet(self, subnet_id): - if subnet_id not in self.__subnets: - raise neutron_exceptions.NeutronClientException - for port in self.__ports.values(): - for fip in port["fixed_ips"]: - if fip["subnet_id"] == subnet_id: - # Subnet has IP allocation from some port - raise neutron_exceptions.NeutronClientException - del self.__subnets[subnet_id] - return "" - - def list_networks(self, **search_opts): - nets = self._filter(self.__networks.values(), search_opts) - return {"networks": nets} - - def list_pools(self, **search_opts): - pools = self._filter(self.__pools.values(), search_opts) - return {"pools": pools} - - def list_vips(self, **search_opts): - vips = self._filter(self.__vips.values(), search_opts) - return {"vips": vips} - - def list_health_monitors(self, **search_opts): - healthmonitors = self._filter( - self.__healthmonitors.values(), search_opts) - return {"healthmonitors": healthmonitors} - - def list_ports(self, **search_opts): - ports = self._filter(self.__ports.values(), search_opts) - return {"ports": ports} - - def list_routers(self, **search_opts): - routers = self._filter(self.__routers.values(), search_opts) - return {"routers": routers} - - def list_subnets(self, **search_opts): - subnets = self._filter(self.__subnets.values(), search_opts) - return {"subnets": subnets} - - def list_floatingips(self, **search_opts): - fips = self._filter(self.__fips.values(), search_opts) - return {"floatingips": fips} - - def remove_interface_router(self, router_id, data): - subnet_id = data["subnet_id"] - - if (router_id not in self.__routers - or subnet_id not in self.__subnets): - raise neutron_exceptions.NeutronClientException - - subnet = self.__subnets[subnet_id] - - for port_id, port in self.__ports.items(): - if port["device_id"] == router_id: - for fip in port["fixed_ips"]: - if fip["subnet_id"] == subnet_id: - del self.__ports[port_id] - return {"subnet_id": subnet_id, - "tenant_id": subnet["tenant_id"], - "port_id": port_id, - "id": router_id} - - raise neutron_exceptions.NeutronClientException - - def associate_health_monitor(self, pool_id, healthmonitor_id): - if pool_id not in self.__pools: - raise neutron_exceptions.NeutronClientException - if healthmonitor_id not in self.__healthmonitors: - raise neutron_exceptions.NeutronClientException - self.__pools[pool_id]["pool"]["healthmonitors"] = healthmonitor_id - return {"pool": self.__pools[pool_id]} - - def disassociate_health_monitor(self, pool_id, healthmonitor_id): - if pool_id not in self.__pools: - raise neutron_exceptions.NeutronClientException - if healthmonitor_id not in self.__healthmonitors: - raise neutron_exceptions.NeutronClientException - del self.__pools[pool_id]["pool"]["healthmonitors"][healthmonitor_id] - return "" - - -class FakeIronicClient(object): - - def __init__(self): - # TODO(romcheg):Fake Manager subclasses to manage BM nodes. - pass - - -class FakeSaharaClient(object): - - def __init__(self): - self.job_executions = mock.MagicMock() - self.jobs = mock.MagicMock() - self.job_binary_internals = mock.MagicMock() - self.job_binaries = mock.MagicMock() - self.data_sources = mock.MagicMock() - - self.clusters = mock.MagicMock() - self.cluster_templates = mock.MagicMock() - self.node_group_templates = mock.MagicMock() - - self.setup_list_methods() - - def setup_list_methods(self): - mock_with_id = mock.MagicMock() - mock_with_id.id = 42 - - # First call of list returns a list with one object, the next should - # empty after delete. - self.job_executions.list.side_effect = [[mock_with_id], []] - self.jobs.list.side_effect = [[mock_with_id], []] - self.job_binary_internals.list.side_effect = [[mock_with_id], []] - self.job_binaries.list.side_effect = [[mock_with_id], []] - self.data_sources.list.side_effect = [[mock_with_id], []] - - self.clusters.list.side_effect = [[mock_with_id], []] - self.cluster_templates.list.side_effect = [[mock_with_id], []] - self.node_group_templates.list.side_effect = [[mock_with_id], []] - - -class FakeZaqarClient(object): - - def __init__(self): - self.queues = FakeQueuesManager() - - def queue(self, name, **kwargs): - return self.queues.create(name, **kwargs) - - -class FakeTroveClient(object): - - def __init__(self): - self.instances = FakeDbInstanceManager() - - -class FakeMistralClient(object): - - def __init__(self): - self.workbook = FakeWorkbookManager() - self.workflow = FakeWorkflowManager() - self.execution = FakeExecutionManager() - - -class FakeSwiftClient(FakeObjectManager): - pass - - -class FakeEC2Client(object): - - def __init__(self): - pass - - -class FakeSenlinClient(object): - - def __init__(self): - # TODO(Yanyan Hu):Fake interfaces of senlinclient. - pass - - -class FakeMagnumClient(object): - - def __init__(self): - self.cluster_templates = FakeClusterTemplateManager() - - -class FakeWatcherClient(object): - - def __init__(self): - self.strategy = FakeStrategyManager() - self.goal = FakeGoalManager() - - -class FakeClients(object): - - def __init__(self, credential_=None): - self._nova = None - self._glance = None - self._keystone = None - self._cinder = None - self._neutron = None - self._sahara = None - self._heat = None - self._designate = None - self._ceilometer = None - self._zaqar = None - self._trove = None - self._mistral = None - self._swift = None - self._murano = None - self._monasca = None - self._ec2 = None - self._senlin = None - self._watcher = None - self._credential = credential_ or fake_credential( - auth_url="http://fake.example.org:5000/v2.0/", - username="fake_username", - password="fake_password", - tenant_name="fake_tenant_name") - - def keystone(self, version=None): - if not self._keystone: - self._keystone = FakeKeystoneClient() - return self._keystone - - def verified_keystone(self): - return self.keystone() - - def nova(self): - if not self._nova: - self._nova = FakeNovaClient() - return self._nova - - def glance(self, version="1"): - if not self._glance: - self._glance = FakeGlanceClient(version) - return self._glance - - def cinder(self): - if not self._cinder: - self._cinder = FakeCinderClient() - return self._cinder - - def neutron(self): - if not self._neutron: - self._neutron = FakeNeutronClient() - return self._neutron - - def sahara(self): - if not self._sahara: - self._sahara = FakeSaharaClient() - return self._sahara - - def heat(self): - if not self._heat: - self._heat = FakeHeatClient() - return self._heat - - def designate(self): - if not self._designate: - self._designate = FakeDesignateClient() - return self._designate - - def ceilometer(self): - if not self._ceilometer: - self._ceilometer = FakeCeilometerClient() - return self._ceilometer - - def monasca(self): - if not self._monasca: - self._monasca = FakeMonascaClient() - return self._monasca - - def zaqar(self): - if not self._zaqar: - self._zaqar = FakeZaqarClient() - return self._zaqar - - def trove(self): - if not self._trove: - self._trove = FakeTroveClient() - return self._trove - - def mistral(self): - if not self._mistral: - self._mistral = FakeMistralClient() - return self._mistral - - def swift(self): - if not self._swift: - self._swift = FakeSwiftClient() - return self._swift - - def murano(self): - if not self._murano: - self._murano = FakeMuranoClient() - return self._murano - - def ec2(self): - if not self._ec2: - self._ec2 = FakeEC2Client() - return self._ec2 - - def senlin(self): - if not self._senlin: - self._senlin = FakeSenlinClient() - return self._senlin - - def watcher(self): - if not self._watcher: - self._watcher = FakeWatcherClient() - return self._watcher - - class FakeRunner(object): CONFIG_SCHEMA = { @@ -1810,37 +250,6 @@ class FakeHiddenContext(FakeContext): pass -@context.configure(name="fake_user_context", order=1) -class FakeUserContext(FakeContext): - - admin = { - "id": "adminuuid", - "credential": fake_credential( - auth_url="aurl", - username="aname", - password="apwd", - tenant_name="atenant") - } - user = { - "id": "uuid", - "credential": fake_credential( - auth_url="url", - username="name", - password="pwd", - tenant_name="tenant"), - "tenant_id": "uuid" - } - tenants = {"uuid": {"name": "tenant"}} - - def __init__(self, ctx): - super(FakeUserContext, self).__init__(ctx) - self.context.setdefault("admin", FakeUserContext.admin) - self.context.setdefault("users", [FakeUserContext.user]) - self.context.setdefault("tenants", FakeUserContext.tenants) - self.context.setdefault( - "scenario_name", "NovaServers.boot_server_from_volume_and_delete") - - class FakeDeployment(dict): def __init__(self, **kwargs): diff --git a/tests/unit/plugins/common/runners/test_rps.py b/tests/unit/plugins/common/runners/test_rps.py index 579b7a9369..bf99a89086 100644 --- a/tests/unit/plugins/common/runners/test_rps.py +++ b/tests/unit/plugins/common/runners/test_rps.py @@ -291,7 +291,7 @@ class RPSScenarioRunnerTestCase(test.TestCase): runner_obj.abort() runner_obj._run_scenario(fakes.FakeScenario, "do_it", - fakes.FakeUser().context, {}) + {}, {}) self.assertEqual(0, len(runner_obj.result_queue)) @@ -381,8 +381,7 @@ class RPSScenarioRunnerTestCase(test.TestCase): runner_obj = rps.RPSScenarioRunner(self.task, sample["input"]) - runner_obj._run_scenario(fakes.FakeScenario, "do_it", - fakes.FakeUser().context, {}) + runner_obj._run_scenario(fakes.FakeScenario, "do_it", {}, {}) mock_cpu_count.assert_called_once_with() mock__log_debug_info.assert_called_once_with( diff --git a/tests/unit/plugins/openstack/__init__.py b/tests/unit/plugins/openstack/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/cleanup/__init__.py b/tests/unit/plugins/openstack/cleanup/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/cleanup/test_base.py b/tests/unit/plugins/openstack/cleanup/test_base.py deleted file mode 100644 index aa451f462f..0000000000 --- a/tests/unit/plugins/openstack/cleanup/test_base.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.cleanup import base -from tests.unit import test - - -BASE = "rally.plugins.openstack.cleanup.base" - - -class ResourceDecoratorTestCase(test.TestCase): - - def test_resource(self): - - @base.resource("service", "res") - class Fake(object): - pass - - self.assertEqual("service", Fake._service) - self.assertEqual("res", Fake._resource) - - -class ResourceManagerTestCase(test.TestCase): - - def test__manager(self): - user = mock.MagicMock() - user.service1().resource1 = "user_res" - - manager = base.ResourceManager(user=user) - manager._service = "service1" - manager._resource = "resource1" - - self.assertEqual("user_res", manager._manager()) - - def test__manager_admin(self): - admin = mock.MagicMock() - admin.service1().resource1 = "admin_res" - - manager = base.ResourceManager(admin=admin) - manager._service = "service1" - manager._resource = "resource1" - manager._admin_required = True - - self.assertEqual("admin_res", manager._manager()) - - def test_id(self): - resource = mock.MagicMock(id="test_id") - - manager = base.ResourceManager(resource=resource) - self.assertEqual(resource.id, manager.id()) - - def test_name(self): - resource = mock.MagicMock(name="test_name") - - manager = base.ResourceManager(resource=resource) - self.assertEqual(resource.name, manager.name()) - - @mock.patch("%s.ResourceManager._manager" % BASE) - def test_is_deleted(self, mock_resource_manager__manager): - raw_res = mock.MagicMock(status="deleted") - mock_resource_manager__manager().get.return_value = raw_res - mock_resource_manager__manager.reset_mock() - - resource = mock.MagicMock(id="test_id") - - manager = base.ResourceManager(resource=resource) - self.assertTrue(manager.is_deleted()) - raw_res.status = "DELETE_COMPLETE" - self.assertTrue(manager.is_deleted()) - raw_res.status = "ACTIVE" - self.assertFalse(manager.is_deleted()) - - mock_resource_manager__manager.assert_has_calls( - [mock.call(), mock.call().get(resource.id)] * 3) - self.assertEqual(3, mock_resource_manager__manager.call_count) - - @mock.patch("%s.ResourceManager._manager" % BASE) - def test_is_deleted_exceptions(self, mock_resource_manager__manager): - - class Fake500Exc(Exception): - code = 500 - - class Fake404Exc(Exception): - code = 404 - - mock_resource_manager__manager.side_effect = [ - Exception, Fake500Exc, Fake404Exc] - - manager = base.ResourceManager(resource=mock.MagicMock()) - self.assertFalse(manager.is_deleted()) - self.assertFalse(manager.is_deleted()) - self.assertTrue(manager.is_deleted()) - - @mock.patch("%s.ResourceManager._manager" % BASE) - def test_delete(self, mock_resource_manager__manager): - res = mock.MagicMock(id="test_id") - - manager = base.ResourceManager(resource=res) - manager.delete() - - mock_resource_manager__manager.assert_has_calls( - [mock.call(), mock.call().delete(res.id)]) - - @mock.patch("%s.ResourceManager._manager" % BASE) - def test_list(self, mock_resource_manager__manager): - base.ResourceManager().list() - mock_resource_manager__manager.assert_has_calls( - [mock.call(), mock.call().list()]) diff --git a/tests/unit/plugins/openstack/cleanup/test_manager.py b/tests/unit/plugins/openstack/cleanup/test_manager.py deleted file mode 100644 index bcff72097a..0000000000 --- a/tests/unit/plugins/openstack/cleanup/test_manager.py +++ /dev/null @@ -1,430 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import utils -from rally.plugins.openstack.cleanup import base -from rally.plugins.openstack.cleanup import manager -from tests.unit import test - - -BASE = "rally.plugins.openstack.cleanup.manager" - - -class SeekAndDestroyTestCase(test.TestCase): - - def setUp(self): - super(SeekAndDestroyTestCase, self).setUp() - # clear out the client cache - manager.SeekAndDestroy.cache = {} - - def test__get_cached_client(self): - api_versions = {"cinder": {"version": "1", "service_type": "volume"}} - - destroyer = manager.SeekAndDestroy(None, None, None, - api_versions=api_versions) - cred = mock.Mock() - user = {"credential": cred} - - clients = destroyer._get_cached_client(user) - self.assertIs(cred.clients.return_value, clients) - cred.clients.assert_called_once_with(api_info=api_versions) - - self.assertIsNone(destroyer._get_cached_client(None)) - - @mock.patch("%s.LOG" % BASE) - def test__delete_single_resource(self, mock_log): - mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10, - _interval=0.01) - mock_resource.delete.side_effect = [Exception, Exception, True] - mock_resource.is_deleted.side_effect = [False, False, True] - - manager.SeekAndDestroy(None, None, None)._delete_single_resource( - mock_resource) - - mock_resource.delete.assert_has_calls([mock.call()] * 3) - self.assertEqual(3, mock_resource.delete.call_count) - mock_resource.is_deleted.assert_has_calls([mock.call()] * 3) - self.assertEqual(3, mock_resource.is_deleted.call_count) - - # NOTE(boris-42): No logs and no exceptions means no bugs! - self.assertEqual(0, mock_log.call_count) - - @mock.patch("%s.LOG" % BASE) - def test__delete_single_resource_timeout(self, mock_log): - - mock_resource = mock.MagicMock(_max_attempts=1, _timeout=0.02, - _interval=0.025) - - mock_resource.delete.return_value = True - mock_resource.is_deleted.side_effect = [False, False, True] - - manager.SeekAndDestroy(None, None, None)._delete_single_resource( - mock_resource) - - mock_resource.delete.assert_called_once_with() - mock_resource.is_deleted.assert_called_once_with() - - self.assertEqual(1, mock_log.warning.call_count) - - @mock.patch("%s.LOG" % BASE) - def test__delete_single_resource_excpetion_in_is_deleted(self, mock_log): - mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10, - _interval=0) - mock_resource.delete.return_value = True - mock_resource.is_deleted.side_effect = [Exception] * 4 - manager.SeekAndDestroy(None, None, None)._delete_single_resource( - mock_resource) - - mock_resource.delete.assert_called_once_with() - self.assertEqual(4, mock_resource.is_deleted.call_count) - - self.assertEqual(1, mock_log.warning.call_count) - self.assertEqual(4, mock_log.exception.call_count) - - def _manager(self, list_side_effect, **kw): - mock_mgr = mock.MagicMock() - mock_mgr().list.side_effect = list_side_effect - mock_mgr.reset_mock() - - for k, v in kw.items(): - setattr(mock_mgr, k, v) - - return mock_mgr - - @mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE) - def test__publisher_admin(self, mock__get_cached_client): - mock_mgr = self._manager([Exception, Exception, [1, 2, 3]], - _perform_for_admin_only=False) - admin = mock.MagicMock() - publish = manager.SeekAndDestroy(mock_mgr, admin, None)._publisher - - queue = [] - publish(queue) - mock__get_cached_client.assert_called_once_with(admin) - mock_mgr.assert_called_once_with( - admin=mock__get_cached_client.return_value) - self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)]) - - @mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE) - def test__publisher_admin_only(self, mock__get_cached_client): - mock_mgr = self._manager([Exception, Exception, [1, 2, 3]], - _perform_for_admin_only=True) - admin = mock.MagicMock() - publish = manager.SeekAndDestroy( - mock_mgr, admin, ["u1", "u2"])._publisher - - queue = [] - publish(queue) - mock__get_cached_client.assert_called_once_with(admin) - mock_mgr.assert_called_once_with( - admin=mock__get_cached_client.return_value) - self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)]) - - @mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE) - def test__publisher_user_resource(self, mock__get_cached_client): - mock_mgr = self._manager([Exception, Exception, [1, 2, 3], - Exception, Exception, [4, 5]], - _perform_for_admin_only=False, - _tenant_resource=True) - - admin = mock.MagicMock() - users = [{"tenant_id": 1, "id": 1}, {"tenant_id": 2, "id": 2}] - publish = manager.SeekAndDestroy(mock_mgr, admin, users)._publisher - - queue = [] - publish(queue) - - mock_client = mock__get_cached_client.return_value - mock_mgr.assert_has_calls([ - mock.call(admin=mock_client, user=mock_client, - tenant_uuid=users[0]["tenant_id"]), - mock.call().list(), - mock.call().list(), - mock.call().list(), - mock.call(admin=mock_client, user=mock_client, - tenant_uuid=users[1]["tenant_id"]), - mock.call().list(), - mock.call().list() - ]) - mock__get_cached_client.assert_has_calls([ - mock.call(admin), - mock.call(users[0]), - mock.call(users[1]) - ]) - expected_queue = [(admin, users[0], x) for x in range(1, 4)] - expected_queue += [(admin, users[1], x) for x in range(4, 6)] - self.assertEqual(expected_queue, queue) - - @mock.patch("%s.LOG" % BASE) - @mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE) - def test__gen_publisher_tenant_resource(self, mock__get_cached_client, - mock_log): - mock_mgr = self._manager([Exception, [1, 2, 3], - Exception, Exception, Exception, - ["this shouldn't be in results"]], - _perform_for_admin_only=False, - _tenant_resource=True) - users = [{"tenant_id": 1, "id": 1}, - {"tenant_id": 1, "id": 2}, - {"tenant_id": 2, "id": 3}] - - publish = manager.SeekAndDestroy( - mock_mgr, None, users)._publisher - - queue = [] - publish(queue) - - mock_client = mock__get_cached_client.return_value - mock_mgr.assert_has_calls([ - mock.call(admin=mock_client, user=mock_client, - tenant_uuid=users[0]["tenant_id"]), - mock.call().list(), - mock.call().list(), - mock.call(admin=mock_client, user=mock_client, - tenant_uuid=users[2]["tenant_id"]), - mock.call().list(), - mock.call().list(), - mock.call().list() - ]) - mock__get_cached_client.assert_has_calls([ - mock.call(None), - mock.call(users[0]), - mock.call(users[2]) - ]) - self.assertEqual(queue, [(None, users[0], x) for x in range(1, 4)]) - self.assertTrue(mock_log.warning.mock_called) - self.assertTrue(mock_log.exception.mock_called) - - @mock.patch("rally.common.utils.name_matches_object") - @mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE) - @mock.patch("%s.SeekAndDestroy._delete_single_resource" % BASE) - def test__consumer(self, mock__delete_single_resource, - mock__get_cached_client, - mock_name_matches_object): - mock_mgr = mock.MagicMock(__name__="Test") - resource_classes = [mock.Mock()] - task_id = "task_id" - mock_name_matches_object.return_value = True - - consumer = manager.SeekAndDestroy( - mock_mgr, None, None, - resource_classes=resource_classes, - task_id=task_id)._consumer - - admin = mock.MagicMock() - user1 = {"id": "a", "tenant_id": "uuid1"} - cache = {} - - consumer(cache, (admin, user1, "res")) - mock_mgr.assert_called_once_with( - resource="res", - admin=mock__get_cached_client.return_value, - user=mock__get_cached_client.return_value, - tenant_uuid=user1["tenant_id"]) - mock__get_cached_client.assert_has_calls([ - mock.call(admin), - mock.call(user1) - ]) - mock__delete_single_resource.assert_called_once_with( - mock_mgr.return_value) - - mock_mgr.reset_mock() - mock__get_cached_client.reset_mock() - mock__delete_single_resource.reset_mock() - mock_name_matches_object.reset_mock() - - consumer(cache, (admin, None, "res2")) - mock_mgr.assert_called_once_with( - resource="res2", - admin=mock__get_cached_client.return_value, - user=mock__get_cached_client.return_value, - tenant_uuid=None) - - mock__get_cached_client.assert_has_calls([ - mock.call(admin), - mock.call(None) - ]) - mock__delete_single_resource.assert_called_once_with( - mock_mgr.return_value) - - @mock.patch("rally.common.utils.name_matches_object") - @mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE) - @mock.patch("%s.SeekAndDestroy._delete_single_resource" % BASE) - def test__consumer_with_noname_resource(self, mock__delete_single_resource, - mock__get_cached_client, - mock_name_matches_object): - mock_mgr = mock.MagicMock(__name__="Test") - mock_mgr.return_value.name.return_value = True - task_id = "task_id" - mock_name_matches_object.return_value = False - - consumer = manager.SeekAndDestroy(mock_mgr, None, None, - task_id=task_id)._consumer - - consumer(None, (None, None, "res")) - self.assertFalse(mock__delete_single_resource.called) - - mock_mgr.return_value.name.return_value = base.NoName("foo") - consumer(None, (None, None, "res")) - mock__delete_single_resource.assert_called_once_with( - mock_mgr.return_value) - - @mock.patch("%s.broker.run" % BASE) - def test_exterminate(self, mock_broker_run): - manager_cls = mock.MagicMock(_threads=5) - cleaner = manager.SeekAndDestroy(manager_cls, None, None) - cleaner._publisher = mock.Mock() - cleaner._consumer = mock.Mock() - cleaner.exterminate() - - mock_broker_run.assert_called_once_with(cleaner._publisher, - cleaner._consumer, - consumers_count=5) - - -class ResourceManagerTestCase(test.TestCase): - - def _get_res_mock(self, **kw): - _mock = mock.MagicMock() - for k, v in kw.items(): - setattr(_mock, k, v) - return _mock - - def _list_res_names_helper(self, names, admin_required, mock_iter): - self.assertEqual(set(names), - manager.list_resource_names(admin_required)) - mock_iter.assert_called_once_with(base.ResourceManager) - mock_iter.reset_mock() - - @mock.patch("%s.discover.itersubclasses" % BASE) - def test_list_resource_names(self, mock_itersubclasses): - mock_itersubclasses.return_value = [ - self._get_res_mock(_service="fake", _resource="1", - _admin_required=True), - self._get_res_mock(_service="fake", _resource="2", - _admin_required=False), - self._get_res_mock(_service="other", _resource="2", - _admin_required=False) - ] - - self._list_res_names_helper( - ["fake", "other", "fake.1", "fake.2", "other.2"], - None, mock_itersubclasses) - self._list_res_names_helper( - ["fake", "fake.1"], - True, mock_itersubclasses) - self._list_res_names_helper( - ["fake", "other", "fake.2", "other.2"], - False, mock_itersubclasses) - - @mock.patch("%s.discover.itersubclasses" % BASE) - def test_find_resource_managers(self, mock_itersubclasses): - mock_itersubclasses.return_value = [ - self._get_res_mock(_service="fake", _resource="1", _order=1, - _admin_required=True), - self._get_res_mock(_service="fake", _resource="2", _order=3, - _admin_required=False), - self._get_res_mock(_service="other", _resource="2", _order=2, - _admin_required=False) - ] - - self.assertEqual(mock_itersubclasses.return_value[0:2], - manager.find_resource_managers(names=["fake"])) - - self.assertEqual(mock_itersubclasses.return_value[0:1], - manager.find_resource_managers(names=["fake.1"])) - - self.assertEqual( - [mock_itersubclasses.return_value[0], - mock_itersubclasses.return_value[2], - mock_itersubclasses.return_value[1]], - manager.find_resource_managers(names=["fake", "other"])) - - self.assertEqual(mock_itersubclasses.return_value[0:1], - manager.find_resource_managers(names=["fake"], - admin_required=True)) - self.assertEqual(mock_itersubclasses.return_value[1:2], - manager.find_resource_managers(names=["fake"], - admin_required=False)) - - @mock.patch("rally.common.plugin.discover.itersubclasses") - @mock.patch("%s.SeekAndDestroy" % BASE) - @mock.patch("%s.find_resource_managers" % BASE, - return_value=[mock.MagicMock(), mock.MagicMock()]) - def test_cleanup(self, mock_find_resource_managers, mock_seek_and_destroy, - mock_itersubclasses): - class A(utils.RandomNameGeneratorMixin): - pass - - class B(object): - pass - - mock_itersubclasses.return_value = [A, B] - - manager.cleanup(names=["a", "b"], admin_required=True, - admin="admin", users=["user"], - superclass=A, - task_id="task_id") - - mock_find_resource_managers.assert_called_once_with(["a", "b"], True) - - mock_seek_and_destroy.assert_has_calls([ - mock.call(mock_find_resource_managers.return_value[0], "admin", - ["user"], api_versions=None, - resource_classes=[A], task_id="task_id"), - mock.call().exterminate(), - mock.call(mock_find_resource_managers.return_value[1], "admin", - ["user"], api_versions=None, - resource_classes=[A], task_id="task_id"), - mock.call().exterminate() - ]) - - @mock.patch("rally.common.plugin.discover.itersubclasses") - @mock.patch("%s.SeekAndDestroy" % BASE) - @mock.patch("%s.find_resource_managers" % BASE, - return_value=[mock.MagicMock(), mock.MagicMock()]) - def test_cleanup_with_api_versions(self, - mock_find_resource_managers, - mock_seek_and_destroy, - mock_itersubclasses): - class A(utils.RandomNameGeneratorMixin): - pass - - class B(object): - pass - - mock_itersubclasses.return_value = [A, B] - - api_versions = {"cinder": {"version": "1", "service_type": "volume"}} - manager.cleanup(names=["a", "b"], admin_required=True, - admin="admin", users=["user"], - api_versions=api_versions, - superclass=utils.RandomNameGeneratorMixin, - task_id="task_id") - - mock_find_resource_managers.assert_called_once_with(["a", "b"], True) - - mock_seek_and_destroy.assert_has_calls([ - mock.call(mock_find_resource_managers.return_value[0], "admin", - ["user"], api_versions=api_versions, - resource_classes=[A], task_id="task_id"), - mock.call().exterminate(), - mock.call(mock_find_resource_managers.return_value[1], "admin", - ["user"], api_versions=api_versions, - resource_classes=[A], task_id="task_id"), - mock.call().exterminate() - ]) diff --git a/tests/unit/plugins/openstack/cleanup/test_resources.py b/tests/unit/plugins/openstack/cleanup/test_resources.py deleted file mode 100644 index 2e73653be9..0000000000 --- a/tests/unit/plugins/openstack/cleanup/test_resources.py +++ /dev/null @@ -1,1120 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from boto import exception as boto_exception -import ddt -import mock -from neutronclient.common import exceptions as neutron_exceptions -from novaclient import exceptions as nova_exc -from watcherclient.common.apiclient import exceptions as watcher_exceptions - -from rally.plugins.openstack.cleanup import resources -from tests.unit import test - -BASE = "rally.plugins.openstack.cleanup.resources" -GLANCE_V2_PATH = ("rally.plugins.openstack.services.image.glance_v2." - "GlanceV2Service") - - -class SynchronizedDeletionTestCase(test.TestCase): - - def test_is_deleted(self): - self.assertTrue(resources.SynchronizedDeletion().is_deleted()) - - -class QuotaMixinTestCase(test.TestCase): - - @mock.patch("%s.identity.Identity" % BASE) - def test_list(self, mock_identity): - quota = resources.QuotaMixin() - quota.tenant_uuid = None - quota.user = mock.MagicMock() - self.assertEqual([], quota.list()) - self.assertFalse(mock_identity.called) - - quota.tenant_uuid = mock.MagicMock() - self.assertEqual([mock_identity.return_value.get_project.return_value], - quota.list()) - mock_identity.assert_called_once_with(quota.user) - - -class MagnumMixinTestCase(test.TestCase): - - def test_id(self): - magnum = resources.MagnumMixin() - magnum._service = "magnum" - magnum.raw_resource = mock.MagicMock() - self.assertEqual(magnum.raw_resource.uuid, magnum.id()) - - def test_list(self): - magnum = resources.MagnumMixin() - magnum._service = "magnum" - some_resources = [mock.MagicMock(), mock.MagicMock(), - mock.MagicMock(), mock.MagicMock()] - magnum._manager = mock.MagicMock() - magnum._manager.return_value.list.side_effect = ( - some_resources[:2], some_resources[2:4], []) - self.assertEqual(some_resources, magnum.list()) - self.assertEqual( - [mock.call(marker=None), mock.call(marker=some_resources[1].uuid), - mock.call(marker=some_resources[3].uuid)], - magnum._manager.return_value.list.call_args_list) - - -class NovaServerTestCase(test.TestCase): - - def test_list(self): - server = resources.NovaServer() - server._manager = mock.MagicMock() - - server.list() - - server._manager.return_value.list.assert_called_once_with(limit=-1) - - def test_delete(self): - server = resources.NovaServer() - server.raw_resource = mock.Mock() - server._manager = mock.Mock() - server.delete() - - server._manager.return_value.delete.assert_called_once_with( - server.raw_resource.id) - - def test_delete_locked(self): - server = resources.NovaServer() - server.raw_resource = mock.Mock() - setattr(server.raw_resource, "OS-EXT-STS:locked", True) - server._manager = mock.Mock() - server.delete() - - server.raw_resource.unlock.assert_called_once_with() - server._manager.return_value.delete.assert_called_once_with( - server.raw_resource.id) - - -class NovaFlavorsTestCase(test.TestCase): - - @mock.patch("%s.base.ResourceManager._manager" % BASE) - def test_is_deleted(self, mock_resource_manager__manager): - exc = nova_exc.NotFound(404) - mock_resource_manager__manager().get.side_effect = exc - flavor = resources.NovaFlavors() - flavor.raw_resource = mock.MagicMock() - self.assertTrue(flavor.is_deleted()) - - @mock.patch("%s.base.ResourceManager._manager" % BASE) - def test_is_deleted_fail(self, mock_resource_manager__manager): - mock_resource_manager__manager().get.side_effect = TypeError() - flavor = resources.NovaFlavors() - flavor.raw_resource = mock.MagicMock() - self.assertRaises(TypeError, flavor.is_deleted) - - -class NovaServerGroupsTestCase(test.TestCase): - - @mock.patch("%s.base.ResourceManager._manager" % BASE) - @mock.patch("rally.common.utils.name_matches_object") - def test_list(self, mock_name_matches_object, - mock_resource_manager__manager): - server_groups = [mock.MagicMock(name="rally_foo1"), - mock.MagicMock(name="rally_foo2"), - mock.MagicMock(name="foo3")] - mock_name_matches_object.side_effect = [False, True, True] - mock_resource_manager__manager().list.return_value = server_groups - self.assertEqual(server_groups, resources.NovaServerGroups().list()) - - -class EC2MixinTestCase(test.TestCase): - - def get_ec2_mixin(self): - ec2 = resources.EC2Mixin() - ec2._service = "ec2" - return ec2 - - def test__manager(self): - ec2 = self.get_ec2_mixin() - ec2.user = mock.MagicMock() - self.assertEqual(ec2.user.ec2.return_value, ec2._manager()) - - -class EC2ServerTestCase(test.TestCase): - - @mock.patch("%s.EC2Server._manager" % BASE) - def test_is_deleted(self, mock_ec2_server__manager): - raw_res1 = mock.MagicMock(state="terminated") - raw_res2 = mock.MagicMock(state="terminated") - resource = mock.MagicMock(id="test_id") - manager = resources.EC2Server(resource=resource) - - mock_ec2_server__manager().get_only_instances.return_value = [raw_res1] - self.assertTrue(manager.is_deleted()) - - raw_res1.state = "running" - self.assertFalse(manager.is_deleted()) - - mock_ec2_server__manager().get_only_instances.return_value = [ - raw_res1, raw_res2] - self.assertFalse(manager.is_deleted()) - - raw_res1.state = "terminated" - self.assertTrue(manager.is_deleted()) - - mock_ec2_server__manager().get_only_instances.return_value = [] - self.assertTrue(manager.is_deleted()) - - @mock.patch("%s.EC2Server._manager" % BASE) - def test_is_deleted_exceptions(self, mock_ec2_server__manager): - mock_ec2_server__manager.side_effect = [ - boto_exception.EC2ResponseError( - status="fake", reason="fake", - body={"Error": {"Code": "fake_code"}}), - boto_exception.EC2ResponseError( - status="fake", reason="fake", - body={"Error": {"Code": "InvalidInstanceID.NotFound"}}) - ] - manager = resources.EC2Server(resource=mock.MagicMock()) - self.assertFalse(manager.is_deleted()) - self.assertTrue(manager.is_deleted()) - - @mock.patch("%s.EC2Server._manager" % BASE) - def test_delete(self, mock_ec2_server__manager): - resource = mock.MagicMock(id="test_id") - manager = resources.EC2Server(resource=resource) - manager.delete() - mock_ec2_server__manager().terminate_instances.assert_called_once_with( - instance_ids=["test_id"]) - - @mock.patch("%s.EC2Server._manager" % BASE) - def test_list(self, mock_ec2_server__manager): - manager = resources.EC2Server() - mock_ec2_server__manager().get_only_instances.return_value = [ - "a", "b", "c"] - self.assertEqual(["a", "b", "c"], manager.list()) - - -class NeutronMixinTestCase(test.TestCase): - - def get_neutron_mixin(self): - neut = resources.NeutronMixin() - neut._service = "neutron" - return neut - - def test_manager(self): - neut = self.get_neutron_mixin() - neut.user = mock.MagicMock() - self.assertEqual(neut.user.neutron.return_value, neut._manager()) - - @mock.patch("%s.NeutronMixin._manager" % BASE) - def test_supports_extension(self, mock__manager): - mock__manager().list_extensions.return_value = { - "extensions": [{"alias": "foo"}, {"alias": "bar"}] - } - neut = self.get_neutron_mixin() - self.assertTrue(neut.supports_extension("foo")) - self.assertTrue(neut.supports_extension("bar")) - self.assertFalse(neut.supports_extension("foobar")) - - def test_id(self): - neut = self.get_neutron_mixin() - neut.raw_resource = {"id": "test"} - self.assertEqual("test", neut.id()) - - def test_name(self): - neutron = self.get_neutron_mixin() - neutron.raw_resource = {"id": "test_id", "name": "test_name"} - self.assertEqual("test_name", neutron.name()) - - def test_delete(self): - neut = self.get_neutron_mixin() - neut.user = mock.MagicMock() - neut._resource = "some_resource" - neut.raw_resource = {"id": "42"} - - neut.delete() - neut.user.neutron().delete_some_resource.assert_called_once_with("42") - - def test_list(self): - neut = self.get_neutron_mixin() - neut.user = mock.MagicMock() - neut._resource = "some_resource" - neut.tenant_uuid = "user_tenant" - - some_resources = [{"tenant_id": neut.tenant_uuid}, {"tenant_id": "a"}] - neut.user.neutron().list_some_resources.return_value = { - "some_resources": some_resources - } - - self.assertEqual([some_resources[0]], list(neut.list())) - - neut.user.neutron().list_some_resources.assert_called_once_with( - tenant_id=neut.tenant_uuid) - - -class NeutronLbaasV1MixinTestCase(test.TestCase): - - def get_neutron_lbaasv1_mixin(self, extensions=None): - if extensions is None: - extensions = [] - neut = resources.NeutronLbaasV1Mixin() - neut._service = "neutron" - neut._resource = "some_resource" - neut._manager = mock.Mock() - neut._manager().list_extensions.return_value = { - "extensions": [{"alias": ext} for ext in extensions] - } - return neut - - def test_list_lbaas_available(self): - neut = self.get_neutron_lbaasv1_mixin(extensions=["lbaas"]) - neut.tenant_uuid = "user_tenant" - - some_resources = [{"tenant_id": neut.tenant_uuid}, {"tenant_id": "a"}] - neut._manager().list_some_resources.return_value = { - "some_resources": some_resources - } - - self.assertEqual([some_resources[0]], list(neut.list())) - neut._manager().list_some_resources.assert_called_once_with( - tenant_id=neut.tenant_uuid) - - def test_list_lbaas_unavailable(self): - neut = self.get_neutron_lbaasv1_mixin() - - self.assertEqual([], list(neut.list())) - self.assertFalse(neut._manager().list_some_resources.called) - - -class NeutronLbaasV2MixinTestCase(test.TestCase): - - def get_neutron_lbaasv2_mixin(self, extensions=None): - if extensions is None: - extensions = [] - neut = resources.NeutronLbaasV2Mixin() - neut._service = "neutron" - neut._resource = "some_resource" - neut._manager = mock.Mock() - neut._manager().list_extensions.return_value = { - "extensions": [{"alias": ext} for ext in extensions] - } - return neut - - def test_list_lbaasv2_available(self): - neut = self.get_neutron_lbaasv2_mixin(extensions=["lbaasv2"]) - neut.tenant_uuid = "user_tenant" - - some_resources = [{"tenant_id": neut.tenant_uuid}, {"tenant_id": "a"}] - neut._manager().list_some_resources.return_value = { - "some_resources": some_resources - } - - self.assertEqual([some_resources[0]], list(neut.list())) - neut._manager().list_some_resources.assert_called_once_with( - tenant_id=neut.tenant_uuid) - - def test_list_lbaasv2_unavailable(self): - neut = self.get_neutron_lbaasv2_mixin() - - self.assertEqual([], list(neut.list())) - self.assertFalse(neut._manager().list_some_resources.called) - - -class NeutronV2LoadbalancerTestCase(test.TestCase): - - def get_neutron_lbaasv2_lb(self): - neutron_lb = resources.NeutronV2Loadbalancer() - neutron_lb.raw_resource = {"id": "1", "name": "s_rally"} - neutron_lb._manager = mock.Mock() - return neutron_lb - - def test_is_deleted_true(self): - from neutronclient.common import exceptions as n_exceptions - neutron_lb = self.get_neutron_lbaasv2_lb() - neutron_lb._manager().show_loadbalancer.side_effect = ( - n_exceptions.NotFound) - - self.assertTrue(neutron_lb.is_deleted()) - - neutron_lb._manager().show_loadbalancer.assert_called_once_with( - neutron_lb.id()) - - def test_is_deleted_false(self): - from neutronclient.common import exceptions as n_exceptions - neutron_lb = self.get_neutron_lbaasv2_lb() - neutron_lb._manager().show_loadbalancer.return_value = ( - neutron_lb.raw_resource) - - self.assertFalse(neutron_lb.is_deleted()) - neutron_lb._manager().show_loadbalancer.assert_called_once_with( - neutron_lb.id()) - - neutron_lb._manager().show_loadbalancer.reset_mock() - - neutron_lb._manager().show_loadbalancer.side_effect = ( - n_exceptions.Forbidden) - - self.assertFalse(neutron_lb.is_deleted()) - neutron_lb._manager().show_loadbalancer.assert_called_once_with( - neutron_lb.id()) - - -class NeutronBgpvpnTestCase(test.TestCase): - - def get_neutron_bgpvpn_mixin(self, extensions=None): - if extensions is None: - extensions = [] - admin = mock.Mock() - neut = resources.NeutronBgpvpn(admin=admin) - neut._manager = mock.Mock() - neut._manager().list_extensions.return_value = { - "extensions": [{"alias": ext} for ext in extensions] - } - return neut - - def test_list_user(self): - neut = self.get_neutron_bgpvpn_mixin(extensions=["bgpvpn"]) - user_bgpvpns = {"bgpvpns": [{"tenant_id": "foo", "id": "bgpvpn_id"}]} - neut._manager().list_bgpvpns.return_value = user_bgpvpns - - bgpvpns_list = neut.list() - self.assertEqual("bgpvpn", neut._resource) - neut._manager().list_bgpvpns.assert_called_once_with() - self.assertEqual(bgpvpns_list, user_bgpvpns["bgpvpns"]) - - def test_list_admin(self): - neut = self.get_neutron_bgpvpn_mixin(extensions=["bgpvpn"]) - admin_bgpvpns = {"bgpvpns": [{"tenant_id": "foo", "id": "bgpvpn_id"}]} - neut._manager().list_bgpvpns.return_value = admin_bgpvpns - - self.assertEqual("bgpvpn", neut._resource) - self.assertEqual(neut.list(), admin_bgpvpns["bgpvpns"]) - - -class NeutronFloatingIPTestCase(test.TestCase): - - def test_name(self): - fips = resources.NeutronFloatingIP({"name": "foo", - "description": "OoO"}) - self.assertEqual(fips.name(), "OoO") - - def test_list(self): - fips = {"floatingips": [{"tenant_id": "foo", "id": "foo"}]} - - user = mock.MagicMock() - user.neutron.return_value.list_floatingips.return_value = fips - - self.assertEqual(fips["floatingips"], list( - resources.NeutronFloatingIP(user=user, tenant_uuid="foo").list())) - user.neutron.return_value.list_floatingips.assert_called_once_with( - tenant_id="foo") - - -class NeutronPortTestCase(test.TestCase): - - def test_delete(self): - raw_res = {"device_owner": "abbabaab", "id": "some_id"} - user = mock.MagicMock() - - resources.NeutronPort(resource=raw_res, user=user).delete() - - user.neutron().delete_port.assert_called_once_with(raw_res["id"]) - - def test_delete_port_raise_exception(self): - raw_res = {"device_owner": "abbabaab", "id": "some_id"} - user = mock.MagicMock() - user.neutron().delete_port.side_effect = ( - neutron_exceptions.PortNotFoundClient) - - resources.NeutronPort(resource=raw_res, user=user).delete() - - user.neutron().delete_port.assert_called_once_with(raw_res["id"]) - - def test_delete_port_device_owner(self): - raw_res = { - "device_owner": "network:router_interface", - "id": "some_id", - "device_id": "dev_id" - } - user = mock.MagicMock() - - resources.NeutronPort(resource=raw_res, user=user).delete() - - user.neutron().remove_interface_router.assert_called_once_with( - raw_res["device_id"], {"port_id": raw_res["id"]}) - - def test_name(self): - raw_res = { - "id": "some_id", - "device_id": "dev_id", - } - - # automatically created or manually created port. No name field - self.assertEqual( - resources.NeutronPort(resource=raw_res, - user=mock.MagicMock()).name(), - "") - - raw_res["name"] = "foo" - self.assertEqual("foo", resources.NeutronPort( - resource=raw_res, user=mock.MagicMock()).name()) - - raw_res["parent_name"] = "bar" - self.assertEqual("bar", resources.NeutronPort( - resource=raw_res, user=mock.MagicMock()).name()) - - del raw_res["name"] - self.assertEqual("bar", resources.NeutronPort( - resource=raw_res, user=mock.MagicMock()).name()) - - def test_list(self): - - tenant_uuid = "uuuu-uuuu-iiii-dddd" - - ports = [ - # the case when 'name' is present, so 'device_owner' field is not - # required - {"tenant_id": tenant_uuid, "id": "id1", "name": "foo"}, - # 3 different cases when router_interface is an owner - {"tenant_id": tenant_uuid, "id": "id2", - "device_owner": "network:router_interface", - "device_id": "router-1"}, - {"tenant_id": tenant_uuid, "id": "id3", - "device_owner": "network:router_interface_distributed", - "device_id": "router-1"}, - {"tenant_id": tenant_uuid, "id": "id4", - "device_owner": "network:ha_router_replicated_interface", - "device_id": "router-2"}, - # the case when gateway router is an owner - {"tenant_id": tenant_uuid, "id": "id5", - "device_owner": "network:router_gateway", - "device_id": "router-3"}, - # the case when gateway router is an owner, but device_id is - # invalid - {"tenant_id": tenant_uuid, "id": "id6", - "device_owner": "network:router_gateway", - "device_id": "aaaa"}, - # the case when port was auto-created with floating-ip - {"tenant_id": tenant_uuid, "id": "id7", - "device_owner": "network:dhcp", - "device_id": "asdasdasd"}, - # the case when port is from another tenant - {"tenant_id": "wrong tenant", "id": "id8", "name": "foo"}, - # WTF port without any parent and name - {"tenant_id": tenant_uuid, "id": "id9", "device_owner": ""}, - ] - - routers = [ - {"id": "router-1", "name": "Router-1", "tenant_id": tenant_uuid}, - {"id": "router-2", "name": "Router-2", "tenant_id": tenant_uuid}, - {"id": "router-3", "name": "Router-3", "tenant_id": tenant_uuid}, - {"id": "router-4", "name": "Router-4", "tenant_id": tenant_uuid}, - {"id": "router-5", "name": "Router-5", "tenant_id": tenant_uuid}, - ] - - expected_ports = [] - for port in ports: - if port["tenant_id"] == tenant_uuid: - expected_ports.append(copy.deepcopy(port)) - if ("device_id" in port and - port["device_id"].startswith("router")): - expected_ports[-1]["parent_name"] = [ - r for r in routers - if r["id"] == port["device_id"]][0]["name"] - - class FakeNeutronClient(object): - - list_ports = mock.Mock() - list_routers = mock.Mock() - - neutron = FakeNeutronClient - neutron.list_ports.return_value = {"ports": ports} - neutron.list_routers.return_value = {"routers": routers} - - user = mock.Mock(neutron=neutron) - self.assertEqual(expected_ports, resources.NeutronPort( - user=user, tenant_uuid=tenant_uuid).list()) - neutron.list_ports.assert_called_once_with() - neutron.list_routers.assert_called_once_with() - - -@ddt.ddt -class NeutronSecurityGroupTestCase(test.TestCase): - - @ddt.data( - {"admin": mock.Mock(), "admin_required": True}, - {"admin": None, "admin_required": False}) - @ddt.unpack - def test_list(self, admin, admin_required): - sg_list = [{"tenant_id": "user_tenant", "name": "default"}, - {"tenant_id": "user_tenant", "name": "foo_sg"}] - - neut = resources.NeutronSecurityGroup() - neut.user = mock.MagicMock() - neut._resource = "security_group" - neut.tenant_uuid = "user_tenant" - - neut.user.neutron().list_security_groups.return_value = { - "security_groups": sg_list - } - - expected_result = [sg_list[1]] - self.assertEqual(expected_result, list(neut.list())) - - neut.user.neutron().list_security_groups.assert_called_once_with( - tenant_id=neut.tenant_uuid) - - -class NeutronQuotaTestCase(test.TestCase): - - def test_delete(self): - admin = mock.MagicMock() - resources.NeutronQuota(admin=admin, tenant_uuid="fake").delete() - admin.neutron.return_value.delete_quota.assert_called_once_with("fake") - - -@ddt.ddt -class GlanceImageTestCase(test.TestCase): - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test__client_admin(self, mock_image): - admin = mock.Mock() - glance = resources.GlanceImage(admin=admin) - client = glance._client() - - mock_image.assert_called_once_with(admin) - self.assertEqual(client, mock_image.return_value) - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test__client_user(self, mock_image): - user = mock.Mock() - glance = resources.GlanceImage(user=user) - wrapper = glance._client() - - mock_image.assert_called_once_with(user) - self.assertEqual(wrapper, mock_image.return_value) - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test__client_admin_preferred(self, mock_image): - admin = mock.Mock() - user = mock.Mock() - glance = resources.GlanceImage(admin=admin, user=user) - client = glance._client() - - mock_image.assert_called_once_with(admin) - self.assertEqual(client, mock_image.return_value) - - def test_list(self): - glance = resources.GlanceImage() - glance._client = mock.Mock() - list_images = glance._client.return_value.list_images - list_images.side_effect = ( - ["active-image1", "active-image2"], - ["deactivated-image1"]) - glance.tenant_uuid = mock.Mock() - - self.assertEqual( - glance.list(), - ["active-image1", "active-image2", "deactivated-image1"]) - list_images.assert_has_calls([ - mock.call(owner=glance.tenant_uuid), - mock.call(status="deactivated", owner=glance.tenant_uuid)]) - - def test_delete(self): - glance = resources.GlanceImage() - glance._client = mock.Mock() - glance._wrapper = mock.Mock() - glance.raw_resource = mock.Mock() - - client = glance._client.return_value - - deleted_image = mock.Mock(status="DELETED") - client.get_image.side_effect = [glance.raw_resource, deleted_image] - - glance.delete() - client.delete_image.assert_called_once_with(glance.raw_resource.id) - self.assertFalse(client.reactivate_image.called) - - @mock.patch("%s.reactivate_image" % GLANCE_V2_PATH) - def test_delete_deactivated_image(self, mock_reactivate_image): - glance = resources.GlanceImage() - glance._client = mock.Mock() - glance._wrapper = mock.Mock() - glance.raw_resource = mock.Mock(status="deactivated") - - client = glance._client.return_value - - deleted_image = mock.Mock(status="DELETED") - client.get_image.side_effect = [glance.raw_resource, deleted_image] - - glance.delete() - - mock_reactivate_image.assert_called_once_with(glance.raw_resource.id) - client.delete_image.assert_called_once_with(glance.raw_resource.id) - - -class CeilometerTestCase(test.TestCase): - - def test_id(self): - ceil = resources.CeilometerAlarms() - ceil.raw_resource = mock.MagicMock() - self.assertEqual(ceil.raw_resource.alarm_id, ceil.id()) - - @mock.patch("%s.CeilometerAlarms._manager" % BASE) - def test_list(self, mock_ceilometer_alarms__manager): - - ceil = resources.CeilometerAlarms() - ceil.tenant_uuid = mock.MagicMock() - mock_ceilometer_alarms__manager().list.return_value = ["a", "b", "c"] - mock_ceilometer_alarms__manager.reset_mock() - - self.assertEqual(["a", "b", "c"], ceil.list()) - mock_ceilometer_alarms__manager().list.assert_called_once_with( - q=[{"field": "project_id", "op": "eq", "value": ceil.tenant_uuid}]) - - -class ZaqarQueuesTestCase(test.TestCase): - - def test_list(self): - user = mock.Mock() - zaqar = resources.ZaqarQueues(user=user) - zaqar.list() - user.zaqar().queues.assert_called_once_with() - - -class KeystoneMixinTestCase(test.TestCase): - - def test_is_deleted(self): - self.assertTrue(resources.KeystoneMixin().is_deleted()) - - def get_keystone_mixin(self): - kmixin = resources.KeystoneMixin() - kmixin._service = "keystone" - return kmixin - - @mock.patch("%s.identity" % BASE) - def test_manager(self, mock_identity): - keystone_mixin = self.get_keystone_mixin() - keystone_mixin.admin = mock.MagicMock() - self.assertEqual(mock_identity.Identity.return_value, - keystone_mixin._manager()) - mock_identity.Identity.assert_called_once_with( - keystone_mixin.admin) - - @mock.patch("%s.identity" % BASE) - def test_delete(self, mock_identity): - keystone_mixin = self.get_keystone_mixin() - keystone_mixin._resource = "some_resource" - keystone_mixin.id = lambda: "id_a" - keystone_mixin.admin = mock.MagicMock() - - keystone_mixin.delete() - mock_identity.Identity.assert_called_once_with(keystone_mixin.admin) - identity_service = mock_identity.Identity.return_value - identity_service.delete_some_resource.assert_called_once_with("id_a") - - @mock.patch("%s.identity" % BASE) - def test_list(self, mock_identity): - keystone_mixin = self.get_keystone_mixin() - keystone_mixin._resource = "some_resource2" - keystone_mixin.admin = mock.MagicMock() - identity = mock_identity.Identity - - self.assertSequenceEqual( - identity.return_value.list_some_resource2s.return_value, - keystone_mixin.list()) - identity.assert_called_once_with(keystone_mixin.admin) - identity.return_value.list_some_resource2s.assert_called_once_with() - - -class KeystoneEc2TestCase(test.TestCase): - def test_user_id_property(self): - user_client = mock.Mock() - admin_client = mock.Mock() - - manager = resources.KeystoneEc2(user=user_client, admin=admin_client) - - self.assertEqual(user_client.keystone.auth_ref.user_id, - manager.user_id) - - def test_list(self): - user_client = mock.Mock() - admin_client = mock.Mock() - - with mock.patch("%s.identity.Identity" % BASE, autospec=True) as p: - identity = p.return_value - manager = resources.KeystoneEc2(user=user_client, - admin=admin_client) - self.assertEqual(identity.list_ec2credentials.return_value, - manager.list()) - p.assert_called_once_with(user_client) - identity.list_ec2credentials.assert_called_once_with( - manager.user_id) - - def test_delete(self): - user_client = mock.Mock() - admin_client = mock.Mock() - raw_resource = mock.Mock() - - with mock.patch("%s.identity.Identity" % BASE, autospec=True) as p: - manager = resources.KeystoneEc2(user=user_client, - admin=admin_client, - resource=raw_resource) - manager.delete() - - p.assert_called_once_with(user_client) - p.return_value.delete_ec2credential.assert_called_once_with( - manager.user_id, access=raw_resource.access) - - -class SwiftMixinTestCase(test.TestCase): - - def get_swift_mixin(self): - swift_mixin = resources.SwiftMixin() - swift_mixin._service = "swift" - return swift_mixin - - def test_manager(self): - swift_mixin = self.get_swift_mixin() - swift_mixin.user = mock.MagicMock() - self.assertEqual(swift_mixin.user.swift.return_value, - swift_mixin._manager()) - - def test_id(self): - swift_mixin = self.get_swift_mixin() - swift_mixin.raw_resource = mock.MagicMock() - self.assertEqual(swift_mixin.raw_resource, swift_mixin.id()) - - def test_name(self): - swift = self.get_swift_mixin() - swift.raw_resource = ["name1", "name2"] - self.assertEqual("name2", swift.name()) - - def test_delete(self): - swift_mixin = self.get_swift_mixin() - swift_mixin.user = mock.MagicMock() - swift_mixin._resource = "some_resource" - swift_mixin.raw_resource = mock.MagicMock() - swift_mixin.delete() - swift_mixin.user.swift().delete_some_resource.assert_called_once_with( - *swift_mixin.raw_resource) - - -class SwiftObjectTestCase(test.TestCase): - - @mock.patch("%s.SwiftMixin._manager" % BASE) - def test_list(self, mock_swift_mixin__manager): - containers = [mock.MagicMock(), mock.MagicMock()] - objects = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()] - mock_swift_mixin__manager().get_account.return_value = ( - "header", containers) - mock_swift_mixin__manager().get_container.return_value = ( - "header", objects) - self.assertEqual(len(containers), - len(resources.SwiftContainer().list())) - self.assertEqual(len(containers) * len(objects), - len(resources.SwiftObject().list())) - - -class SwiftContainerTestCase(test.TestCase): - - @mock.patch("%s.SwiftMixin._manager" % BASE) - def test_list(self, mock_swift_mixin__manager): - containers = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()] - mock_swift_mixin__manager().get_account.return_value = ( - "header", containers) - self.assertEqual(len(containers), - len(resources.SwiftContainer().list())) - - -class ManilaShareTestCase(test.TestCase): - - def test_list(self): - share_resource = resources.ManilaShare() - share_resource._manager = mock.MagicMock() - - share_resource.list() - - self.assertEqual("shares", share_resource._resource) - share_resource._manager.return_value.list.assert_called_once_with() - - def test_delete(self): - share_resource = resources.ManilaShare() - share_resource._manager = mock.MagicMock() - share_resource.id = lambda: "fake_id" - - share_resource.delete() - - self.assertEqual("shares", share_resource._resource) - share_resource._manager.return_value.delete.assert_called_once_with( - "fake_id") - - -class ManilaShareNetworkTestCase(test.TestCase): - - def test_list(self): - sn_resource = resources.ManilaShareNetwork() - sn_resource._manager = mock.MagicMock() - - sn_resource.list() - - self.assertEqual("share_networks", sn_resource._resource) - sn_resource._manager.return_value.list.assert_called_once_with() - - def test_delete(self): - sn_resource = resources.ManilaShareNetwork() - sn_resource._manager = mock.MagicMock() - sn_resource.id = lambda: "fake_id" - - sn_resource.delete() - - self.assertEqual("share_networks", sn_resource._resource) - sn_resource._manager.return_value.delete.assert_called_once_with( - "fake_id") - - -class ManilaSecurityServiceTestCase(test.TestCase): - - def test_list(self): - ss_resource = resources.ManilaSecurityService() - ss_resource._manager = mock.MagicMock() - - ss_resource.list() - - self.assertEqual("security_services", ss_resource._resource) - ss_resource._manager.return_value.list.assert_called_once_with() - - def test_delete(self): - ss_resource = resources.ManilaSecurityService() - ss_resource._manager = mock.MagicMock() - ss_resource.id = lambda: "fake_id" - - ss_resource.delete() - - self.assertEqual("security_services", ss_resource._resource) - ss_resource._manager.return_value.delete.assert_called_once_with( - "fake_id") - - -class MistralWorkbookTestCase(test.TestCase): - - def test_delete(self): - clients = mock.MagicMock() - resource = mock.Mock() - resource.name = "TEST_NAME" - - mistral = resources.MistralWorkbooks( - user=clients, - resource=resource) - - mistral.delete() - - clients.mistral().workbooks.delete.assert_called_once_with( - "TEST_NAME") - - -class MistralExecutionsTestCase(test.TestCase): - - def test_name(self): - execution = mock.MagicMock(workflow_name="bar") - execution.name = "foo" - self.assertEqual("bar", resources.MistralExecutions(execution).name()) - - -class SenlinMixinTestCase(test.TestCase): - - def test_id(self): - senlin = resources.SenlinMixin() - senlin.raw_resource = {"id": "TEST_ID"} - self.assertEqual("TEST_ID", senlin.id()) - - def test__manager(self): - senlin = resources.SenlinMixin() - senlin._service = "senlin" - senlin.user = mock.MagicMock() - self.assertEqual(senlin.user.senlin.return_value, senlin._manager()) - - def test_list(self): - senlin = resources.SenlinMixin() - senlin._service = "senlin" - senlin.user = mock.MagicMock() - senlin._resource = "some_resources" - - some_resources = [{"name": "resource1"}, {"name": "resource2"}] - senlin.user.senlin().some_resources.return_value = some_resources - - self.assertEqual(some_resources, senlin.list()) - senlin.user.senlin().some_resources.assert_called_once_with() - - def test_delete(self): - senlin = resources.SenlinMixin() - senlin._service = "senlin" - senlin.user = mock.MagicMock() - senlin._resource = "some_resources" - senlin.raw_resource = {"id": "TEST_ID"} - senlin.user.senlin().delete_some_resource.return_value = None - - senlin.delete() - senlin.user.senlin().delete_some_resource.assert_called_once_with( - "TEST_ID") - - -class WatcherTemplateTestCase(test.TestCase): - - def test_id(self): - watcher = resources.WatcherTemplate() - watcher.raw_resource = mock.MagicMock(uuid=100) - self.assertEqual(100, watcher.id()) - - @mock.patch("%s.WatcherTemplate._manager" % BASE) - def test_is_deleted(self, mock__manager): - mock__manager.return_value.get.return_value = None - watcher = resources.WatcherTemplate() - watcher.id = mock.Mock() - self.assertFalse(watcher.is_deleted()) - mock__manager.side_effect = [watcher_exceptions.NotFound()] - self.assertTrue(watcher.is_deleted()) - - def test_list(self): - watcher = resources.WatcherTemplate() - watcher._manager = mock.MagicMock() - - watcher.list() - - self.assertEqual("audit_template", watcher._resource) - watcher._manager().list.assert_called_once_with(limit=0) - - -class WatcherAuditTestCase(test.TestCase): - - def test_id(self): - watcher = resources.WatcherAudit() - watcher.raw_resource = mock.MagicMock(uuid=100) - self.assertEqual(100, watcher.id()) - - def test_name(self): - watcher = resources.WatcherAudit() - watcher.raw_resource = mock.MagicMock(uuid="name") - self.assertEqual("name", watcher.name()) - - @mock.patch("%s.WatcherAudit._manager" % BASE) - def test_is_deleted(self, mock__manager): - mock__manager.return_value.get.return_value = None - watcher = resources.WatcherAudit() - watcher.id = mock.Mock() - self.assertFalse(watcher.is_deleted()) - mock__manager.side_effect = [watcher_exceptions.NotFound()] - self.assertTrue(watcher.is_deleted()) - - def test_list(self): - watcher = resources.WatcherAudit() - watcher._manager = mock.MagicMock() - - watcher.list() - - self.assertEqual("audit", watcher._resource) - watcher._manager().list.assert_called_once_with(limit=0) - - -class WatcherActionPlanTestCase(test.TestCase): - - def test_id(self): - watcher = resources.WatcherActionPlan() - watcher.raw_resource = mock.MagicMock(uuid=100) - self.assertEqual(100, watcher.id()) - - def test_name(self): - watcher = resources.WatcherActionPlan() - self.assertIsInstance(watcher.name(), resources.base.NoName) - - @mock.patch("%s.WatcherActionPlan._manager" % BASE) - def test_is_deleted(self, mock__manager): - mock__manager.return_value.get.return_value = None - watcher = resources.WatcherActionPlan() - watcher.id = mock.Mock() - self.assertFalse(watcher.is_deleted()) - mock__manager.side_effect = [watcher_exceptions.NotFound()] - self.assertTrue(watcher.is_deleted()) - - def test_list(self): - watcher = resources.WatcherActionPlan() - watcher._manager = mock.MagicMock() - - watcher.list() - - self.assertEqual("action_plan", watcher._resource) - watcher._manager().list.assert_called_once_with(limit=0) - - -class CinderImageVolumeCacheTestCase(test.TestCase): - - class Resource(object): - def __init__(self, id=None, name=None): - self.id = id - self.name = name - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test_list(self, mock_image): - admin = mock.Mock() - - glance = mock_image.return_value - cinder = admin.cinder.return_value - - image_1 = self.Resource("foo", name="foo-name") - image_2 = self.Resource("bar", name="bar-name") - glance.list_images.return_value = [image_1, image_2] - volume_1 = self.Resource(name="v1") - volume_2 = self.Resource(name="image-foo") - volume_3 = self.Resource(name="foo") - volume_4 = self.Resource(name="bar") - cinder.volumes.list.return_value = [volume_1, volume_2, volume_3, - volume_4] - - manager = resources.CinderImageVolumeCache(admin=admin) - - self.assertEqual([{"volume": volume_2, "image": image_1}], - manager.list()) - - mock_image.assert_called_once_with(admin) - glance.list_images.assert_called_once_with() - cinder.volumes.list.assert_called_once_with( - search_opts={"all_tenants": 1}) - - def test_id_and_name(self): - - res = resources.CinderImageVolumeCache( - {"volume": self.Resource("volume-id", "volume-name"), - "image": self.Resource("image-id", "image-name")}) - - self.assertEqual("volume-id", res.id()) - self.assertEqual("image-name", res.name()) - - -class GnocchiArchivePolicyRuleTestCase(test.TestCase): - - def get_gnocchi(self): - gnocchi = resources.GnocchiArchivePolicyRule() - gnocchi._service = "gnocchi" - return gnocchi - - def test_id(self): - gnocchi = self.get_gnocchi() - gnocchi.raw_resource = {"name": "test_name"} - self.assertEqual("test_name", gnocchi.id()) - - def test_name(self): - gnocchi = self.get_gnocchi() - gnocchi.raw_resource = {"name": "test_name"} - self.assertEqual("test_name", gnocchi.name()) diff --git a/tests/unit/plugins/openstack/context/__init__.py b/tests/unit/plugins/openstack/context/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/ceilometer/__init__.py b/tests/unit/plugins/openstack/context/ceilometer/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/ceilometer/test_samples.py b/tests/unit/plugins/openstack/context/ceilometer/test_samples.py deleted file mode 100644 index 23ff6ae86a..0000000000 --- a/tests/unit/plugins/openstack/context/ceilometer/test_samples.py +++ /dev/null @@ -1,179 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import mock -import six - -from rally import exceptions -from rally.plugins.openstack.context.ceilometer import samples -from rally.plugins.openstack.scenarios.ceilometer import utils as ceilo_utils -from tests.unit import test - -CTX = "rally.plugins.openstack.context.ceilometer" - - -class CeilometerSampleGeneratorTestCase(test.TestCase): - - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = {"name": str(id_)} - return tenants - - def _gen_context(self, tenants_count, users_per_tenant, - resources_per_tenant, samples_per_resource): - tenants = self._gen_tenants(tenants_count) - users = [] - for id_ in tenants.keys(): - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": id_, - "credential": mock.MagicMock()}) - context = test.get_test_context() - context.update({ - "config": { - "users": { - "tenants": tenants_count, - "users_per_tenant": users_per_tenant, - "concurrent": 10, - }, - "ceilometer": { - "counter_name": "fake-counter-name", - "counter_type": "fake-counter-type", - "counter_unit": "fake-counter-unit", - "counter_volume": 100, - "resources_per_tenant": resources_per_tenant, - "samples_per_resource": samples_per_resource, - "timestamp_interval": 60, - "metadata_list": ( - {"status": "active", "name": "fake_resource", - "deleted": "False", - "created_at": "2015-09-04T12:34:19.000000"}, - {"status": "not_active", "name": "fake_resource_1", - "deleted": "False", - "created_at": "2015-09-10T06:55:12.000000"}, - ) - } - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users, - "tenants": tenants, - "user_choice_method": "random", - }) - return tenants, context - - def test_init(self): - context = {} - context["task"] = mock.MagicMock() - context["config"] = { - "ceilometer": { - "counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "instance", - "counter_volume": 1.0, - "resources_per_tenant": 5, - "samples_per_resource": 5, - "timestamp_interval": 60, - "metadata_list": ( - {"status": "active", "name": "fake_resource", - "deleted": "False", - "created_at": "2015-09-04T12:34:19.000000"}, - {"status": "not_active", "name": "fake_resource_1", - "deleted": "False", - "created_at": "2015-09-10T06:55:12.000000"}, - ) - } - } - - inst = samples.CeilometerSampleGenerator(context) - self.assertEqual(inst.config, context["config"]["ceilometer"]) - - def test__store_batch_samples(self): - tenants_count = 2 - users_per_tenant = 2 - resources_per_tenant = 2 - samples_per_resource = 2 - - tenants, real_context = self._gen_context( - tenants_count, users_per_tenant, - resources_per_tenant, samples_per_resource) - ceilometer_ctx = samples.CeilometerSampleGenerator(real_context) - scenario = ceilo_utils.CeilometerScenario(real_context) - self.assertRaises( - exceptions.ContextSetupFailure, - ceilometer_ctx._store_batch_samples, - scenario, ["foo", "bar"], 1) - - @mock.patch("%s.samples.ceilo_utils.CeilometerScenario._get_resource" - % CTX) - @mock.patch("%s.samples.ceilo_utils.CeilometerScenario._create_samples" - % CTX) - @mock.patch( - "rally.plugins.openstack.scenarios.ceilometer.utils.uuid") - def test_setup(self, mock_uuid, mock_create_samples, mock_get_resource): - mock_uuid.uuid4.return_value = "fake_resource-id" - tenants_count = 2 - users_per_tenant = 2 - resources_per_tenant = 2 - samples_per_resource = 2 - - tenants, real_context = self._gen_context( - tenants_count, users_per_tenant, - resources_per_tenant, samples_per_resource) - scenario = ceilo_utils.CeilometerScenario(real_context) - sample = { - "counter_name": "fake-counter-name", - "counter_type": "fake-counter-type", - "counter_unit": "fake-counter-unit", - "counter_volume": 100, - "metadata_list": [ - {"status": "active", "name": "fake_resource", - "deleted": "False", - "created_at": "2015-09-04T12:34:19.000000"}, - {"status": "not_active", "name": "fake_resource_1", - "deleted": "False", - "created_at": "2015-09-10T06:55:12.000000"} - ] - } - kwargs = copy.deepcopy(sample) - samples_to_create = list( - scenario._make_samples(count=samples_per_resource, interval=60, - **kwargs) - )[0] - new_context = copy.deepcopy(real_context) - for id_ in tenants.keys(): - new_context["tenants"][id_].setdefault("samples", []) - new_context["tenants"][id_].setdefault("resources", []) - for i in six.moves.xrange(resources_per_tenant): - for sample in samples_to_create: - new_context["tenants"][id_]["samples"].append(sample) - new_context["tenants"][id_]["resources"].append( - sample["resource_id"]) - - mock_create_samples.return_value = [] - for i, sample in enumerate(samples_to_create): - sample_object = mock.MagicMock(resource_id="fake_resource-id") - sample_object.to_dict.return_value = sample - mock_create_samples.return_value.append(sample_object) - ceilometer_ctx = samples.CeilometerSampleGenerator(real_context) - ceilometer_ctx.setup() - self.assertEqual(new_context, ceilometer_ctx.context) - - def test_cleanup(self): - tenants, context = self._gen_context(2, 5, 3, 3) - ceilometer_ctx = samples.CeilometerSampleGenerator(context) - ceilometer_ctx.cleanup() diff --git a/tests/unit/plugins/openstack/context/cinder/__init__.py b/tests/unit/plugins/openstack/context/cinder/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/cinder/test_volume_types.py b/tests/unit/plugins/openstack/context/cinder/test_volume_types.py deleted file mode 100644 index 7ec1ba2949..0000000000 --- a/tests/unit/plugins/openstack/context/cinder/test_volume_types.py +++ /dev/null @@ -1,65 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.cinder import volume_types -from tests.unit import test - -CTX = "rally.plugins.openstack.context.cinder.volume_types" -SERVICE = "rally.plugins.openstack.services.storage" - - -class VolumeTypeGeneratorTestCase(test.ContextTestCase): - def setUp(self): - super(VolumeTypeGeneratorTestCase, self).setUp() - self.context.update({"admin": {"credential": "admin_creds"}}) - - @mock.patch("%s.block.BlockStorage" % SERVICE) - def test_setup(self, mock_block_storage): - self.context.update({"config": {"volume_types": ["foo", "bar"]}}) - mock_service = mock_block_storage.return_value - mock_service.create_volume_type.side_effect = ( - mock.Mock(id="foo-id"), mock.Mock(id="bar-id")) - - vtype_ctx = volume_types.VolumeTypeGenerator(self.context) - vtype_ctx.setup() - - mock_service.create_volume_type.assert_has_calls( - [mock.call("foo"), mock.call("bar")]) - self.assertEqual(self.context["volume_types"], - [{"id": "foo-id", "name": "foo"}, - {"id": "bar-id", "name": "bar"}]) - - @mock.patch("%s.utils.make_name_matcher" % CTX) - @mock.patch("%s.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup, mock_make_name_matcher): - self.context.update({ - "config": {"volume_types": ["foo", "bar"], - "api_versions": { - "cinder": {"version": 2, - "service_type": "volumev2"}}}}) - - vtype_ctx = volume_types.VolumeTypeGenerator(self.context) - - vtype_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["cinder.volume_types"], - admin=self.context["admin"], - api_versions=self.context["config"]["api_versions"], - superclass=mock_make_name_matcher.return_value, - task_id=vtype_ctx.get_owner_id()) - - mock_make_name_matcher.assert_called_once_with("foo", "bar") diff --git a/tests/unit/plugins/openstack/context/cinder/test_volumes.py b/tests/unit/plugins/openstack/context/cinder/test_volumes.py deleted file mode 100644 index 4bcd328486..0000000000 --- a/tests/unit/plugins/openstack/context/cinder/test_volumes.py +++ /dev/null @@ -1,200 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import ddt -import mock - -from rally.plugins.openstack.context.cinder import volumes -from rally.task import context -from tests.unit import test - -CTX = "rally.plugins.openstack.context" -SERVICE = "rally.plugins.openstack.services.storage" - - -@ddt.ddt -class VolumeGeneratorTestCase(test.ScenarioTestCase): - - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = {"name": str(id_)} - return tenants - - def test_init(self): - self.context.update({ - "config": { - "volumes": { - "size": 1, - "volumes_per_tenant": 5, - } - } - }) - - inst = volumes.VolumeGenerator(self.context) - self.assertEqual(inst.config, self.context["config"]["volumes"]) - - @ddt.data({"config": {"size": 1, "volumes_per_tenant": 5}}, - {"config": {"size": 1, "type": None, "volumes_per_tenant": 5}}, - {"config": {"size": 1, "type": -1, "volumes_per_tenant": 5}, - "valid": False}) - @ddt.unpack - @mock.patch("%s.block.BlockStorage" % SERVICE) - def test_setup(self, mock_block_storage, config, valid=True): - results = context.Context.validate("volumes", None, None, config) - if valid: - self.assertEqual([], results) - else: - self.assertEqual(1, len(results)) - - from rally.plugins.openstack.services.storage import block - created_volume = block.Volume(id="uuid", size=config["size"], - name="vol", status="avaiable") - - mock_service = mock_block_storage.return_value - mock_service.create_volume.return_value = created_volume - users_per_tenant = 5 - volumes_per_tenant = config.get("volumes_per_tenant", 5) - tenants = self._gen_tenants(2) - users = [] - for id_ in tenants: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": id_, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 5, - "concurrent": 10, - }, - "volumes": config - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users, - "tenants": tenants - }) - - new_context = copy.deepcopy(self.context) - for id_ in tenants.keys(): - new_context["tenants"][id_].setdefault("volumes", []) - for i in range(volumes_per_tenant): - new_context["tenants"][id_]["volumes"].append( - mock_service.create_volume.return_value._as_dict()) - - volumes_ctx = volumes.VolumeGenerator(self.context) - volumes_ctx.setup() - self.assertEqual(new_context, self.context) - - @mock.patch("%s.cinder.volumes.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup): - tenants_count = 2 - users_per_tenant = 5 - volumes_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for id_ in tenants.keys(): - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": id_, - "credential": "credential"}) - tenants[id_].setdefault("volumes", []) - for j in range(volumes_per_tenant): - tenants[id_]["volumes"].append({"id": "uuid"}) - - self.context.update({ - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 5, - "concurrent": 10, - }, - "volumes": { - "size": 1, - "volumes_per_tenant": 5, - } - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users, - "tenants": tenants - }) - - volumes_ctx = volumes.VolumeGenerator(self.context) - volumes_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["cinder.volumes"], users=self.context["users"], - api_versions=None, superclass=volumes_ctx.__class__, - task_id=self.context["owner_id"]) - - @mock.patch("%s.cinder.volumes.resource_manager.cleanup" % CTX) - def test_cleanup_api_versions(self, mock_cleanup): - - tenants_count = 2 - users_per_tenant = 5 - volumes_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for id_ in tenants.keys(): - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": id_, - "credential": "credential"}) - tenants[id_].setdefault("volumes", []) - for j in range(volumes_per_tenant): - tenants[id_]["volumes"].append({"id": "uuid"}) - - api_version = { - "cinder": { - "version": 1, - "service_type": "volume" - } - } - self.context.update({ - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 5, - "concurrent": 10, - }, - "volumes": { - "size": 1, - "type": "volume_type", - "volumes_per_tenant": 5, - }, - "api_versions": api_version - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users, - "tenants": tenants - }) - - volumes_ctx = volumes.VolumeGenerator(self.context) - volumes_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["cinder.volumes"], - users=self.context["users"], - api_versions=api_version, - superclass=volumes_ctx.__class__, - task_id=self.context["owner_id"]) diff --git a/tests/unit/plugins/openstack/context/cleanup/__init__.py b/tests/unit/plugins/openstack/context/cleanup/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/cleanup/test_admin.py b/tests/unit/plugins/openstack/context/cleanup/test_admin.py deleted file mode 100644 index 692662ce2c..0000000000 --- a/tests/unit/plugins/openstack/context/cleanup/test_admin.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.common import utils -from rally.plugins.openstack.context.cleanup import admin -from rally.plugins.openstack import scenario -from rally.task import context -from tests.unit import test - - -ADMIN = "rally.plugins.openstack.context.cleanup.admin" -BASE = "rally.plugins.openstack.context.cleanup.base" - - -@ddt.ddt -class AdminCleanupTestCase(test.TestCase): - - @mock.patch("%s.manager" % BASE) - @ddt.data((["a", "b"], True), - (["a", "e"], False), - (3, False)) - @ddt.unpack - def test_validate(self, config, valid, mock_manager): - mock_manager.list_resource_names.return_value = {"a", "b", "c"} - results = context.Context.validate( - "admin_cleanup", None, None, config, allow_hidden=True) - if valid: - self.assertEqual([], results) - else: - self.assertGreater(len(results), 0) - - @mock.patch("rally.common.plugin.discover.itersubclasses") - @mock.patch("%s.manager.find_resource_managers" % ADMIN, - return_value=[mock.MagicMock(), mock.MagicMock()]) - @mock.patch("%s.manager.SeekAndDestroy" % ADMIN) - def test_cleanup(self, mock_seek_and_destroy, mock_find_resource_managers, - mock_itersubclasses): - class ResourceClass(utils.RandomNameGeneratorMixin): - pass - - mock_itersubclasses.return_value = [ResourceClass] - - ctx = { - "config": {"admin_cleanup": ["a", "b"]}, - "admin": mock.MagicMock(), - "users": mock.MagicMock(), - "task": {"uuid": "task_id"} - } - - admin_cleanup = admin.AdminCleanup(ctx) - admin_cleanup.setup() - admin_cleanup.cleanup() - - mock_itersubclasses.assert_called_once_with(scenario.OpenStackScenario) - mock_find_resource_managers.assert_called_once_with(("a", "b"), True) - mock_seek_and_destroy.assert_has_calls([ - mock.call(mock_find_resource_managers.return_value[0], - ctx["admin"], - ctx["users"], - api_versions=None, - resource_classes=[ResourceClass], - task_id="task_id"), - mock.call().exterminate(), - mock.call(mock_find_resource_managers.return_value[1], - ctx["admin"], - ctx["users"], - api_versions=None, - resource_classes=[ResourceClass], - task_id="task_id"), - mock.call().exterminate() - ]) - - @mock.patch("rally.common.plugin.discover.itersubclasses") - @mock.patch("%s.manager.find_resource_managers" % ADMIN, - return_value=[mock.MagicMock(), mock.MagicMock()]) - @mock.patch("%s.manager.SeekAndDestroy" % ADMIN) - def test_cleanup_admin_with_api_versions(self, - mock_seek_and_destroy, - mock_find_resource_managers, - mock_itersubclasses): - class ResourceClass(utils.RandomNameGeneratorMixin): - pass - - mock_itersubclasses.return_value = [ResourceClass] - - ctx = { - "config": - {"admin_cleanup": ["a", "b"], - "api_versions": - {"cinder": - {"version": "1", - "service_type": "volume" - } - } - }, - "admin": mock.MagicMock(), - "users": mock.MagicMock(), - "task": mock.MagicMock() - } - - admin_cleanup = admin.AdminCleanup(ctx) - admin_cleanup.setup() - admin_cleanup.cleanup() - - mock_itersubclasses.assert_called_once_with(scenario.OpenStackScenario) - mock_find_resource_managers.assert_called_once_with(("a", "b"), True) - mock_seek_and_destroy.assert_has_calls([ - mock.call(mock_find_resource_managers.return_value[0], - ctx["admin"], - ctx["users"], - api_versions=ctx["config"]["api_versions"], - resource_classes=[ResourceClass], - task_id=ctx["task"]["uuid"]), - mock.call().exterminate(), - mock.call(mock_find_resource_managers.return_value[1], - ctx["admin"], - ctx["users"], - api_versions=ctx["config"]["api_versions"], - resource_classes=[ResourceClass], - task_id=ctx["task"]["uuid"]), - mock.call().exterminate() - ]) diff --git a/tests/unit/plugins/openstack/context/cleanup/test_user.py b/tests/unit/plugins/openstack/context/cleanup/test_user.py deleted file mode 100644 index 95baf3e77b..0000000000 --- a/tests/unit/plugins/openstack/context/cleanup/test_user.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.common import utils -from rally.plugins.openstack.context.cleanup import user -from rally.plugins.openstack import scenario -from rally.task import context -from tests.unit import test - - -ADMIN = "rally.plugins.openstack.context.cleanup.admin" -BASE = "rally.plugins.openstack.context.cleanup.base" - - -@ddt.ddt -class UserCleanupTestCase(test.TestCase): - - @mock.patch("%s.manager" % BASE) - @ddt.data((["a", "b"], True), - (["a", "e"], False), - (3, False)) - @ddt.unpack - def test_validate(self, config, valid, mock_manager): - mock_manager.list_resource_names.return_value = {"a", "b", "c"} - results = context.Context.validate( - "cleanup", None, None, config, allow_hidden=True) - if valid: - self.assertEqual([], results) - else: - self.assertGreater(len(results), 0) - - @mock.patch("rally.common.plugin.discover.itersubclasses") - @mock.patch("%s.manager.find_resource_managers" % ADMIN, - return_value=[mock.MagicMock(), mock.MagicMock()]) - @mock.patch("%s.manager.SeekAndDestroy" % ADMIN) - def test_cleanup(self, mock_seek_and_destroy, mock_find_resource_managers, - mock_itersubclasses): - - class ResourceClass(utils.RandomNameGeneratorMixin): - pass - - mock_itersubclasses.return_value = [ResourceClass] - - ctx = { - "config": {"cleanup": ["a", "b"]}, - "users": mock.MagicMock(), - "task": {"uuid": "task_id"} - } - - admin_cleanup = user.UserCleanup(ctx) - admin_cleanup.setup() - admin_cleanup.cleanup() - - mock_itersubclasses.assert_called_once_with(scenario.OpenStackScenario) - mock_find_resource_managers.assert_called_once_with(("a", "b"), False) - mock_seek_and_destroy.assert_has_calls([ - mock.call(mock_find_resource_managers.return_value[0], - None, ctx["users"], api_versions=None, - resource_classes=[ResourceClass], task_id="task_id"), - mock.call().exterminate(), - mock.call(mock_find_resource_managers.return_value[1], - None, ctx["users"], api_versions=None, - resource_classes=[ResourceClass], task_id="task_id"), - mock.call().exterminate() - ]) - - @mock.patch("rally.common.plugin.discover.itersubclasses") - @mock.patch("%s.manager.find_resource_managers" % ADMIN, - return_value=[mock.MagicMock(), mock.MagicMock()]) - @mock.patch("%s.manager.SeekAndDestroy" % ADMIN) - def test_cleanup_user_with_api_versions( - self, - mock_seek_and_destroy, - mock_find_resource_managers, - mock_itersubclasses): - - class ResourceClass(utils.RandomNameGeneratorMixin): - pass - - mock_itersubclasses.return_value = [ResourceClass] - - ctx = { - "config": - {"admin_cleanup": ["a", "b"], - "api_versions": - {"cinder": - {"version": "1", - "service_type": "volume" - } - } - }, - "admin": mock.MagicMock(), - "users": mock.MagicMock(), - "task": {"uuid": "task_id"} - } - - user_cleanup = user.UserCleanup(ctx) - user_cleanup.setup() - user_cleanup.cleanup() - - mock_itersubclasses.assert_called_once_with(scenario.OpenStackScenario) - mock_find_resource_managers.assert_called_once_with({}, False) - mock_seek_and_destroy.assert_has_calls([ - mock.call(mock_find_resource_managers.return_value[0], - None, - ctx["users"], - api_versions=ctx["config"]["api_versions"], - resource_classes=[ResourceClass], - task_id="task_id"), - mock.call().exterminate(), - mock.call(mock_find_resource_managers.return_value[1], - None, - ctx["users"], - api_versions=ctx["config"]["api_versions"], - resource_classes=[ResourceClass], - task_id="task_id"), - mock.call().exterminate() - ]) diff --git a/tests/unit/plugins/openstack/context/dataplane/__init__.py b/tests/unit/plugins/openstack/context/dataplane/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/dataplane/test_heat.py b/tests/unit/plugins/openstack/context/dataplane/test_heat.py deleted file mode 100644 index 9a549a2eec..0000000000 --- a/tests/unit/plugins/openstack/context/dataplane/test_heat.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools - -import mock - -from rally.plugins.openstack.context import dataplane -from tests.unit import test - -MOD = "rally.plugins.openstack.context.dataplane.heat." - - -class TestHeatWorkload(test.ScenarioTestCase): - - @mock.patch(MOD + "pkgutil") - def test_get_data_resource(self, mock_pkgutil): - mock_pkgutil.get_data.return_value = "fake_data" - data = dataplane.heat.get_data([1, 2]) - self.assertEqual("fake_data", data) - mock_pkgutil.get_data.assert_called_once_with(1, 2) - - @mock.patch(MOD + "open") - def test_get_data_file(self, mock_open): - data = dataplane.heat.get_data(1) - self.assertEqual(mock_open.return_value.read.return_value, data) - mock_open.assert_called_once_with(1) - - def test__get_context_parameter(self): - user = [1, 2] - tenant = [3, 4, {"one": 1}] - self.context["tenants"] = {1: tenant} - ctx = dataplane.heat.HeatDataplane(self.context) - gcp = functools.partial(ctx._get_context_parameter, user, 1) - self.assertEqual(1, gcp("user.0")) - self.assertEqual(2, gcp("user.1")) - self.assertEqual(3, gcp("tenant.0")) - self.assertEqual(1, gcp("tenant.2.one")) - - @mock.patch(MOD + "osclients.Clients") - def test__get_public_network_id(self, mock_clients): - fake_net = {"id": "fake_id"} - fake_nc = mock.Mock(name="fake_neutronclient") - fake_nc.list_networks.return_value = {"networks": [fake_net]} - mock_clients.neutron.return_value = fake_nc - mock_clients.return_value = mock.Mock( - neutron=mock.Mock(return_value=fake_nc)) - self.context["admin"] = {"credential": "fake_credential"} - ctx = dataplane.heat.HeatDataplane(self.context) - network_id = ctx._get_public_network_id() - self.assertEqual("fake_id", network_id) - mock_clients.assert_called_once_with("fake_credential") - - @mock.patch(MOD + "get_data") - @mock.patch(MOD + "HeatDataplane._get_context_parameter") - @mock.patch(MOD + "heat_utils") - def test_setup(self, - mock_heat_utils, - mock_heat_dataplane__get_context_parameter, - mock_get_data): - self.context.update({ - "config": { - "heat_dataplane": { - "stacks_per_tenant": 1, - "template": "tpl.yaml", - "files": {"file1": "f1.yaml", "file2": "f2.yaml"}, - "parameters": {"key": "value"}, - "context_parameters": {"ctx.key": "ctx.value"}, - } - }, - "users": [{"tenant_id": "t1", "keypair": {"name": "kp1"}}, ], - "tenants": {"t1": {"networks": [{"router_id": "rid"}]}}, - }) - mock_heat_dataplane__get_context_parameter.return_value = "gcp" - mock_get_data.side_effect = ["tpl", "sf1", "sf2"] - ctx = dataplane.heat.HeatDataplane(self.context) - ctx._get_public_network_id = mock.Mock(return_value="fake_net") - ctx.setup() - workloads = self.context["tenants"]["t1"]["stack_dataplane"] - self.assertEqual(1, len(workloads)) - wl = workloads[0] - fake_scenario = mock_heat_utils.HeatScenario.return_value - self.assertEqual(fake_scenario._create_stack.return_value.id, wl[0]) - self.assertEqual("tpl", wl[1]) - self.assertIn("sf1", wl[2].values()) - self.assertIn("sf2", wl[2].values()) - expected = { - "ctx.key": "gcp", - "key": "value", - "key_name": "kp1", - "network_id": "fake_net", - "router_id": "rid"} - self.assertEqual(expected, wl[3]) diff --git a/tests/unit/plugins/openstack/context/designate/__init__.py b/tests/unit/plugins/openstack/context/designate/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/designate/test_zones.py b/tests/unit/plugins/openstack/context/designate/test_zones.py deleted file mode 100644 index 76b2b218ae..0000000000 --- a/tests/unit/plugins/openstack/context/designate/test_zones.py +++ /dev/null @@ -1,132 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import copy - -import mock - -from rally.plugins.openstack.context.designate import zones -from rally.plugins.openstack.scenarios.designate import utils -from tests.unit import test - -CTX = "rally.plugins.openstack.context" -SCN = "rally.plugins.openstack.scenarios" - - -class ZoneGeneratorTestCase(test.ScenarioTestCase): - - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = {"name": str(id_)} - return tenants - - def test_init(self): - self.context.update({ - "config": { - "zones": { - "zones_per_tenant": 5, - } - } - }) - - inst = zones.ZoneGenerator(self.context) - self.assertEqual(inst.config, self.context["config"]["zones"]) - - @mock.patch("%s.designate.utils.DesignateScenario._create_zone" % SCN, - return_value={"id": "uuid"}) - def test_setup(self, mock_designate_scenario__create_zone): - tenants_count = 2 - users_per_tenant = 5 - zones_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for id_ in tenants.keys(): - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": id_, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 5, - "concurrent": 10, - }, - "zones": { - "zones_per_tenant": zones_per_tenant, - } - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users, - "tenants": tenants - }) - - new_context = copy.deepcopy(self.context) - for id_ in tenants.keys(): - new_context["tenants"][id_].setdefault("zones", []) - for i in range(zones_per_tenant): - new_context["tenants"][id_]["zones"].append({"id": "uuid"}) - - zones_ctx = zones.ZoneGenerator(self.context) - zones_ctx.setup() - self.assertEqual(new_context, self.context) - - @mock.patch("%s.designate.zones.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup): - - tenants_count = 2 - users_per_tenant = 5 - zones_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for id_ in tenants.keys(): - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": id_, - "endpoint": "endpoint"}) - tenants[id_].setdefault("zones", []) - for j in range(zones_per_tenant): - tenants[id_]["zones"].append({"id": "uuid"}) - - self.context.update({ - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 5, - "concurrent": 10, - }, - "zones": { - "zones_per_tenant": 5, - } - }, - "admin": { - "endpoint": mock.MagicMock() - }, - "users": users, - "tenants": tenants - }) - - zones_ctx = zones.ZoneGenerator(self.context) - zones_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["designate.zones"], - users=self.context["users"], - superclass=utils.DesignateScenario, - task_id=self.context["owner_id"]) diff --git a/tests/unit/plugins/openstack/context/ec2/__init__.py b/tests/unit/plugins/openstack/context/ec2/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/ec2/test_servers.py b/tests/unit/plugins/openstack/context/ec2/test_servers.py deleted file mode 100644 index ad1c7a07e7..0000000000 --- a/tests/unit/plugins/openstack/context/ec2/test_servers.py +++ /dev/null @@ -1,110 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import mock - -from rally.plugins.openstack.context.ec2 import servers -from rally.plugins.openstack.scenarios.ec2 import utils as ec2_utils -from tests.unit import fakes -from tests.unit import test - -CTX = "rally.plugins.openstack.context.ec2" -SCN = "rally.plugins.openstack.scenarios" -TYP = "rally.plugins.openstack.types" - - -class EC2ServerGeneratorTestCase(test.TestCase): - - def _gen_tenants_and_users(self, tenants_count, users_per_tenant): - tenants = {} - for id in range(tenants_count): - tenants[str(id)] = dict(name=str(id)) - - users = [] - for tenant_id in tenants.keys(): - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": tenant_id, - "credential": mock.MagicMock()}) - return tenants, users - - def _get_context(self, users, tenants): - return { - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 5, - "concurrent": 10}, - "ec2_servers": { - "servers_per_tenant": 5, - "image": {"name": "foo_image"}, - "flavor": {"name": "foo_flavor"} - } - }, - "admin": {"credential": mock.MagicMock()}, - "task": mock.MagicMock(), - "owner_id": "foo_uuid", - "users": users, - "tenants": tenants - } - - @mock.patch("%s.ec2.utils.EC2Scenario._boot_servers" % SCN, - return_value=[fakes.FakeServer(id=str(i)) for i in range(5)]) - @mock.patch("%s.EC2Image" % TYP) - def test_setup(self, mock_ec2_image, mock_ec2_scenario__boot_servers): - - tenants_count = 2 - users_per_tenant = 5 - servers_per_tenant = 5 - - tenants, users = self._gen_tenants_and_users(tenants_count, - users_per_tenant) - - real_context = self._get_context(users, tenants) - - new_context = copy.deepcopy(real_context) - for tenant_id in new_context["tenants"]: - new_context["tenants"][tenant_id].setdefault("ec2_servers", []) - for i in range(servers_per_tenant): - new_context["tenants"][tenant_id]["ec2_servers"].append(str(i)) - - servers_ctx = servers.EC2ServerGenerator(real_context) - servers_ctx.setup() - self.assertEqual(new_context, servers_ctx.context) - - @mock.patch("%s.servers.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup): - - tenants_count = 2 - users_per_tenant = 5 - servers_per_tenant = 5 - - tenants, users = self._gen_tenants_and_users(tenants_count, - users_per_tenant) - for tenant_id in tenants.keys(): - tenants[tenant_id].setdefault("ec2_servers", []) - for i in range(servers_per_tenant): - tenants[tenant_id]["ec2_servers"].append(str(i)) - - context = self._get_context(users, tenants) - - servers_ctx = servers.EC2ServerGenerator(context) - servers_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["ec2.servers"], - users=context["users"], - superclass=ec2_utils.EC2Scenario, - task_id="foo_uuid") diff --git a/tests/unit/plugins/openstack/context/glance/__init__.py b/tests/unit/plugins/openstack/context/glance/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/glance/test_images.py b/tests/unit/plugins/openstack/context/glance/test_images.py deleted file mode 100644 index 1fc0af9273..0000000000 --- a/tests/unit/plugins/openstack/context/glance/test_images.py +++ /dev/null @@ -1,288 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import copy - -import ddt -import mock - -from rally.plugins.openstack.context.glance import images -from tests.unit import test - -CTX = "rally.plugins.openstack.context.glance.images" -SCN = "rally.plugins.openstack.scenarios.glance" - - -@ddt.ddt -class ImageGeneratorTestCase(test.ScenarioTestCase): - - tenants_num = 1 - users_per_tenant = 5 - users_num = tenants_num * users_per_tenant - threads = 10 - - def setUp(self): - super(ImageGeneratorTestCase, self).setUp() - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - "resource_management_workers": self.threads, - } - }, - "admin": {"credential": mock.MagicMock()}, - "users": [], - "task": {"uuid": "task_id"} - }) - patch = mock.patch( - "rally.plugins.openstack.services.image.image.Image") - self.addCleanup(patch.stop) - self.mock_image = patch.start() - - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = {"name": str(id_)} - return tenants - - @ddt.data( - {}, - {"min_disk": 1, "min_ram": 2}, - {"image_name": "foo"}, - {"tenants": 3, "users_per_tenant": 2, "images_per_tenant": 5}, - {"api_versions": {"glance": {"version": 2, "service_type": "image"}}}) - @ddt.unpack - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_setup(self, mock_clients, - container_format="bare", disk_format="qcow2", - image_url="http://example.com/fake/url", - tenants=1, users_per_tenant=1, images_per_tenant=1, - image_name=None, min_ram=None, min_disk=None, - api_versions=None, visibility="public"): - image_service = self.mock_image.return_value - - tenant_data = self._gen_tenants(tenants) - users = [] - for tenant_id in tenant_data: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": tenant_id, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": tenants, - "users_per_tenant": users_per_tenant, - "concurrent": 10, - }, - "images": { - "image_url": image_url, - "disk_format": disk_format, - "container_format": container_format, - "images_per_tenant": images_per_tenant, - "visibility": visibility, - } - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users, - "tenants": tenant_data - }) - if api_versions: - self.context["config"]["api_versions"] = api_versions - - expected_image_args = {} - if image_name is not None: - self.context["config"]["images"]["image_name"] = image_name - if min_ram is not None: - self.context["config"]["images"]["min_ram"] = min_ram - expected_image_args["min_ram"] = min_ram - if min_disk is not None: - self.context["config"]["images"]["min_disk"] = min_disk - expected_image_args["min_disk"] = min_disk - - new_context = copy.deepcopy(self.context) - for tenant_id in new_context["tenants"].keys(): - new_context["tenants"][tenant_id]["images"] = [ - image_service.create_image.return_value.id - ] * images_per_tenant - - images_ctx = images.ImageGenerator(self.context) - images_ctx.setup() - self.assertEqual(new_context, self.context) - - wrapper_calls = [] - wrapper_calls.extend([mock.call(mock_clients.return_value.glance, - images_ctx)] * tenants) - wrapper_calls.extend( - [mock.call().create_image( - container_format, image_url, disk_format, - name=mock.ANY, **expected_image_args)] * - tenants * images_per_tenant) - - mock_clients.assert_has_calls( - [mock.call(mock.ANY, api_info=api_versions)] * tenants) - - @mock.patch("%s.image.Image" % CTX) - @mock.patch("%s.LOG" % CTX) - def test_setup_with_deprecated_args(self, mock_log, mock_image): - image_type = "itype" - image_container = "icontainer" - is_public = True - d_min_ram = mock.Mock() - d_min_disk = mock.Mock() - self.context.update({ - "config": { - "images": {"image_type": image_type, - "image_container": image_container, - "image_args": {"is_public": is_public, - "min_ram": d_min_ram, - "min_disk": d_min_disk}} - }, - "users": [{"tenant_id": "foo-tenant", - "credential": mock.MagicMock()}], - "tenants": {"foo-tenant": {}} - }) - images_ctx = images.ImageGenerator(self.context) - images_ctx.setup() - - mock_image.return_value.create_image.assert_called_once_with( - image_name=None, - container_format=image_container, - image_location=None, - disk_format=image_type, - visibility="public", - min_disk=d_min_disk, - min_ram=d_min_ram - ) - expected_warns = [ - mock.call("The 'image_type' argument is deprecated since " - "Rally 0.10.0, use disk_format argument instead"), - mock.call("The 'image_container' argument is deprecated since " - "Rally 0.10.0; use container_format argument instead"), - mock.call("The 'image_args' argument is deprecated since " - "Rally 0.10.0; specify arguments in a root " - "section of context instead")] - - self.assertEqual(expected_warns, mock_log.warning.call_args_list) - - mock_image.return_value.create_image.reset_mock() - mock_log.warning.reset_mock() - - min_ram = mock.Mock() - min_disk = mock.Mock() - visibility = "foo" - disk_format = "dformat" - container_format = "cformat" - - self.context["config"]["images"].update({ - "min_ram": min_ram, - "min_disk": min_disk, - "visibility": visibility, - "disk_format": disk_format, - "container_format": container_format - }) - - images_ctx = images.ImageGenerator(self.context) - images_ctx.setup() - - # check that deprecated arguments are not used - mock_image.return_value.create_image.assert_called_once_with( - image_name=None, - container_format=container_format, - image_location=None, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram - ) - # No matter will be deprecated arguments used or not, if they are - # specified, warning message should be printed. - self.assertEqual(expected_warns, mock_log.warning.call_args_list) - - @ddt.data( - {"admin": True}, - {"api_versions": {"glance": {"version": 2, "service_type": "image"}}}) - @ddt.unpack - @mock.patch("%s.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup, admin=None, api_versions=None): - images_per_tenant = 5 - - tenants = self._gen_tenants(self.tenants_num) - users = [] - created_images = [] - for tenant_id in tenants: - for i in range(self.users_per_tenant): - users.append({"id": i, "tenant_id": tenant_id, - "credential": mock.MagicMock()}) - tenants[tenant_id].setdefault("images", []) - for j in range(images_per_tenant): - image = mock.Mock() - created_images.append(image) - tenants[tenant_id]["images"].append(image) - - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - "concurrent": 10, - }, - "images": {}, - "api_versions": api_versions - }, - "users": mock.Mock() - }) - - if admin: - self.context["admin"] = {"credential": mock.MagicMock()} - else: - # ensure that there is no admin - self.context.pop("admin") - - images_ctx = images.ImageGenerator(self.context) - images_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["glance.images", "cinder.image_volumes_cache"], - admin=self.context.get("admin"), - admin_required=None if admin else False, - users=self.context["users"], - api_versions=api_versions, - superclass=images_ctx.__class__, - task_id=self.context["owner_id"]) - - @mock.patch("%s.rutils.make_name_matcher" % CTX) - @mock.patch("%s.resource_manager.cleanup" % CTX) - def test_cleanup_for_predefined_name(self, mock_cleanup, - mock_make_name_matcher): - self.context.update({ - "config": { - "images": {"image_name": "foo"} - }, - "users": mock.Mock() - }) - - images_ctx = images.ImageGenerator(self.context) - images_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["glance.images", "cinder.image_volumes_cache"], - admin=self.context.get("admin"), - admin_required=None, - users=self.context["users"], - api_versions=None, - superclass=mock_make_name_matcher.return_value, - task_id=self.context["owner_id"]) diff --git a/tests/unit/plugins/openstack/context/heat/__init__.py b/tests/unit/plugins/openstack/context/heat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/heat/test_stacks.py b/tests/unit/plugins/openstack/context/heat/test_stacks.py deleted file mode 100644 index 1a5e1c7a72..0000000000 --- a/tests/unit/plugins/openstack/context/heat/test_stacks.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.heat import stacks -from rally.plugins.openstack.scenarios.heat import utils as heat_utils -from tests.unit import fakes -from tests.unit import test - -CTX = "rally.plugins.openstack.context" -SCN = "rally.plugins.openstack.scenarios" - - -class TestStackGenerator(test.ScenarioTestCase): - - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = dict(name=str(id_)) - return tenants - - def test_init(self): - self.context.update({ - "config": { - "stacks": { - "stacks_per_tenant": 1, - "resources_per_stack": 1 - } - } - }) - - inst = stacks.StackGenerator(self.context) - self.assertEqual(inst.config, self.context["config"]["stacks"]) - - @mock.patch("%s.heat.utils.HeatScenario._create_stack" % SCN, - return_value=fakes.FakeStack(id="uuid")) - def test_setup(self, mock_heat_scenario__create_stack): - tenants_count = 2 - users_per_tenant = 5 - stacks_per_tenant = 1 - - tenants = self._gen_tenants(tenants_count) - users = [] - for ten_id in tenants: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": ten_id, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": tenants_count, - "users_per_tenant": users_per_tenant, - "concurrent": 10, - }, - "stacks": { - "stacks_per_tenant": stacks_per_tenant, - "resources_per_stack": 1 - } - }, - "users": users, - "tenants": tenants - }) - - stack_ctx = stacks.StackGenerator(self.context) - stack_ctx.setup() - self.assertEqual(tenants_count * stacks_per_tenant, - mock_heat_scenario__create_stack.call_count) - # check that stack ids have been saved in context - for ten_id in self.context["tenants"].keys(): - self.assertEqual(stacks_per_tenant, - len(self.context["tenants"][ten_id]["stacks"])) - - @mock.patch("%s.heat.stacks.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup): - self.context.update({ - "users": mock.MagicMock() - }) - stack_ctx = stacks.StackGenerator(self.context) - stack_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["heat.stacks"], - users=self.context["users"], - superclass=heat_utils.HeatScenario, - task_id=self.context["owner_id"]) diff --git a/tests/unit/plugins/openstack/context/keystone/__init__.py b/tests/unit/plugins/openstack/context/keystone/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/keystone/test_roles.py b/tests/unit/plugins/openstack/context/keystone/test_roles.py deleted file mode 100644 index 0272a865ba..0000000000 --- a/tests/unit/plugins/openstack/context/keystone/test_roles.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.context.keystone import roles -from tests.unit import fakes -from tests.unit import test - -CTX = "rally.plugins.openstack.context.keystone.roles" - - -class RoleGeneratorTestCase(test.TestCase): - - def create_default_roles_and_patch_add_remove_functions(self, fc): - fc.keystone().roles.add_user_role = mock.MagicMock() - fc.keystone().roles.remove_user_role = mock.MagicMock() - fc.keystone().roles.create("r1", "test_role1") - fc.keystone().roles.create("r2", "test_role2") - self.assertEqual(2, len(fc.keystone().roles.list())) - - @property - def context(self): - return { - "config": { - "roles": [ - "test_role1", - "test_role2" - ] - }, - "admin": {"credential": mock.MagicMock()}, - "task": mock.MagicMock() - } - - @mock.patch("%s.osclients" % CTX) - def test_add_role(self, mock_osclients): - fc = fakes.FakeClients() - mock_osclients.Clients.return_value = fc - self.create_default_roles_and_patch_add_remove_functions(fc) - - ctx = roles.RoleGenerator(self.context) - ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"}, - {"id": "u2", "tenant_id": "t2"}] - ctx.credential = mock.MagicMock() - ctx.setup() - - expected = {"r1": "test_role1", "r2": "test_role2"} - self.assertEqual(expected, ctx.context["roles"]) - - @mock.patch("%s.osclients" % CTX) - def test_add_role_which_does_not_exist(self, mock_osclients): - fc = fakes.FakeClients() - mock_osclients.Clients.return_value = fc - self.create_default_roles_and_patch_add_remove_functions(fc) - - ctx = roles.RoleGenerator(self.context) - ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"}, - {"id": "u2", "tenant_id": "t2"}] - ctx.config = ["unknown_role"] - ctx.credential = mock.MagicMock() - ex = self.assertRaises(exceptions.NotFoundException, - ctx._get_role_object, "unknown_role") - - expected = ("The resource can not be found: There is no role " - "with name `unknown_role`") - self.assertEqual(expected, str(ex)) - - @mock.patch("%s.osclients" % CTX) - def test_remove_role(self, mock_osclients): - fc = fakes.FakeClients() - mock_osclients.Clients.return_value = fc - self.create_default_roles_and_patch_add_remove_functions(fc) - - ctx = roles.RoleGenerator(self.context) - ctx.context["roles"] = {"r1": "test_role1", - "r2": "test_role2"} - ctx.context["users"] = [{"id": "u1", "tenant_id": "t1", - "assigned_roles": ["r1", "r2"]}, - {"id": "u2", "tenant_id": "t2", - "assigned_roles": ["r1", "r2"]}] - ctx.credential = mock.MagicMock() - ctx.cleanup() - calls = [ - mock.call(user="u1", role="r1", tenant="t1"), - mock.call(user="u2", role="r1", tenant="t2"), - mock.call(user="u1", role="r2", tenant="t1"), - mock.call(user="u2", role="r2", tenant="t2") - ] - - fc.keystone().roles.remove_user_role.assert_has_calls(calls, - any_order=True) - - @mock.patch("%s.osclients" % CTX) - def test_setup_and_cleanup(self, mock_osclients): - fc = fakes.FakeClients() - mock_osclients.Clients.return_value = fc - self.create_default_roles_and_patch_add_remove_functions(fc) - - def _get_user_role_ids_side_effect(user_id, project_id): - return ["r1", "r2"] if user_id == "u3" else [] - - with roles.RoleGenerator(self.context) as ctx: - ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"}, - {"id": "u2", "tenant_id": "t2"}, - {"id": "u3", "tenant_id": "t3"}] - - ctx._get_user_role_ids = mock.MagicMock() - ctx._get_user_role_ids.side_effect = _get_user_role_ids_side_effect - ctx.setup() - ctx.credential = mock.MagicMock() - calls = [ - mock.call(user="u1", role="r1", tenant="t1"), - mock.call(user="u2", role="r1", tenant="t2"), - mock.call(user="u1", role="r2", tenant="t1"), - mock.call(user="u2", role="r2", tenant="t2"), - ] - fc.keystone().roles.add_user_role.assert_has_calls(calls, - any_order=True) - self.assertEqual( - 4, fc.keystone().roles.add_user_role.call_count) - self.assertEqual( - 0, fc.keystone().roles.remove_user_role.call_count) - self.assertEqual(2, len(ctx.context["roles"])) - self.assertEqual(2, len(fc.keystone().roles.list())) - - # Cleanup (called by context manager) - self.assertEqual(2, len(fc.keystone().roles.list())) - self.assertEqual(4, fc.keystone().roles.add_user_role.call_count) - self.assertEqual(4, fc.keystone().roles.remove_user_role.call_count) - calls = [ - mock.call(user="u1", role="r1", tenant="t1"), - mock.call(user="u2", role="r1", tenant="t2"), - mock.call(user="u1", role="r2", tenant="t1"), - mock.call(user="u2", role="r2", tenant="t2") - ] - fc.keystone().roles.remove_user_role.assert_has_calls(calls, - any_order=True) diff --git a/tests/unit/plugins/openstack/context/keystone/test_users.py b/tests/unit/plugins/openstack/context/keystone/test_users.py deleted file mode 100644 index 47553dab7b..0000000000 --- a/tests/unit/plugins/openstack/context/keystone/test_users.py +++ /dev/null @@ -1,531 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import consts -from rally import exceptions -from rally.plugins.openstack.context.keystone import users -from rally.plugins.openstack import credential as oscredential -from tests.unit import test - -CTX = "rally.plugins.openstack.context.keystone.users" - - -class UserGeneratorBaseTestCase(test.ScenarioTestCase): - def setUp(self): - super(UserGeneratorBaseTestCase, self).setUp() - self.osclients_patcher = mock.patch("%s.osclients" % CTX) - self.osclients = self.osclients_patcher.start() - self.addCleanup(self.osclients_patcher.stop) - - self.deployment_uuid = "deployment_id" - - self.admin_cred = { - "username": "root", "password": "qwerty", - "auth_url": "https://example.com", - "project_domain_name": "foo", - "user_domain_name": "bar"} - - self.platforms = { - "openstack": { - "admin": self.admin_cred, - "users": [] - } - } - - self.context.update({ - "config": {"users": {}}, - "env": {"platforms": self.platforms}, - "task": {"uuid": "task_id", - "deployment_uuid": self.deployment_uuid} - }) - - def test___init__for_new_users(self): - self.context["config"]["users"] = { - "tenants": 1, "users_per_tenant": 1, - "resource_management_workers": 1} - - user_generator = users.UserGenerator(self.context) - - self.assertEqual([], user_generator.existing_users) - self.assertEqual(self.admin_cred["project_domain_name"], - user_generator.config["project_domain"]) - self.assertEqual(self.admin_cred["user_domain_name"], - user_generator.config["user_domain"]) - - # the case #2 - existing users are presented in deployment but - # the user forces to create new ones - self.platforms["openstack"]["users"] = [mock.Mock()] - - user_generator = users.UserGenerator(self.context) - - self.assertEqual([], user_generator.existing_users) - self.assertEqual(self.admin_cred["project_domain_name"], - user_generator.config["project_domain"]) - self.assertEqual(self.admin_cred["user_domain_name"], - user_generator.config["user_domain"]) - - def test___init__for_existing_users(self): - foo_user = mock.Mock() - - self.platforms["openstack"]["users"] = [foo_user] - - user_generator = users.UserGenerator(self.context) - - self.assertEqual([foo_user], user_generator.existing_users) - self.assertEqual({"user_choice_method": "random"}, - user_generator.config) - - # the case #2: the config with `user_choice_method` option - self.context["config"]["users"] = {"user_choice_method": "foo"} - - user_generator = users.UserGenerator(self.context) - - self.assertEqual([foo_user], user_generator.existing_users) - self.assertEqual({"user_choice_method": "foo"}, user_generator.config) - - def test_setup(self): - user_generator = users.UserGenerator(self.context) - user_generator.use_existing_users = mock.Mock() - user_generator.create_users = mock.Mock() - - # no existing users -> new users should be created - user_generator.existing_users = [] - - user_generator.setup() - - user_generator.create_users.assert_called_once_with() - self.assertFalse(user_generator.use_existing_users.called) - - user_generator.create_users.reset_mock() - user_generator.use_existing_users.reset_mock() - - # existing_users is not empty -> existing users should be created - user_generator.existing_users = [mock.Mock()] - - user_generator.setup() - - user_generator.use_existing_users.assert_called_once_with() - self.assertFalse(user_generator.create_users.called) - - def test_cleanup(self): - user_generator = users.UserGenerator(self.context) - user_generator._remove_default_security_group = mock.Mock() - user_generator._delete_users = mock.Mock() - user_generator._delete_tenants = mock.Mock() - - # In case if existing users nothing should be done - user_generator.existing_users = [mock.Mock] - - user_generator.cleanup() - - self.assertFalse(user_generator._remove_default_security_group.called) - self.assertFalse(user_generator._delete_users.called) - self.assertFalse(user_generator._delete_tenants.called) - - # In case when new users were created, the proper cleanup should be - # performed - user_generator.existing_users = [] - - user_generator.cleanup() - - user_generator._remove_default_security_group.assert_called_once_with() - user_generator._delete_users.assert_called_once_with() - user_generator._delete_tenants.assert_called_once_with() - - -class UserGeneratorForExistingUsersTestCase(test.ScenarioTestCase): - def setUp(self): - super(UserGeneratorForExistingUsersTestCase, self).setUp() - self.osclients_patcher = mock.patch("%s.osclients" % CTX) - self.osclients = self.osclients_patcher.start() - self.addCleanup(self.osclients_patcher.stop) - - self.deployment_uuid = "deployment_id" - - self.platforms = { - "openstack": { - "admin": {"username": "root", - "password": "qwerty", - "auth_url": "https://example.com"}, - "users": [] - } - } - self.context.update({ - "config": {"users": {}}, - "users": [], - "env": {"platforms": self.platforms}, - "task": {"uuid": "task_id", - "deployment_uuid": self.deployment_uuid} - }) - - @mock.patch("%s.credential.OpenStackCredential" % CTX) - @mock.patch("%s.osclients.Clients" % CTX) - def test_use_existing_users(self, mock_clients, - mock_open_stack_credential): - user1 = {"tenant_name": "proj", "username": "usr", - "password": "pswd", "auth_url": "https://example.com"} - user2 = {"tenant_name": "proj", "username": "usr", - "password": "pswd", "auth_url": "https://example.com"} - user3 = {"tenant_name": "proj", "username": "usr", - "password": "pswd", "auth_url": "https://example.com"} - - user_list = [user1, user2, user3] - - class AuthRef(object): - USER_ID_COUNT = 0 - PROJECT_ID_COUNT = 0 - - @property - def user_id(self): - self.USER_ID_COUNT += 1 - return "u%s" % self.USER_ID_COUNT - - @property - def project_id(self): - self.PROJECT_ID_COUNT += 1 - return "p%s" % (self.PROJECT_ID_COUNT % 2) - - auth_ref = AuthRef() - - mock_clients.return_value.keystone.auth_ref = auth_ref - - self.platforms["openstack"]["users"] = user_list - - user_generator = users.UserGenerator(self.context) - user_generator.setup() - - self.assertIn("users", self.context) - self.assertIn("tenants", self.context) - self.assertIn("user_choice_method", self.context) - self.assertEqual("random", self.context["user_choice_method"]) - - creds = mock_open_stack_credential.return_value - self.assertEqual( - [{"id": "u1", "credential": creds, "tenant_id": "p1"}, - {"id": "u2", "credential": creds, "tenant_id": "p0"}, - {"id": "u3", "credential": creds, "tenant_id": "p1"}], - self.context["users"] - ) - self.assertEqual({"p0": {"id": "p0", "name": creds.tenant_name}, - "p1": {"id": "p1", "name": creds.tenant_name}}, - self.context["tenants"]) - - -class UserGeneratorForNewUsersTestCase(test.ScenarioTestCase): - - tenants_num = 1 - users_per_tenant = 5 - users_num = tenants_num * users_per_tenant - threads = 10 - - def setUp(self): - super(UserGeneratorForNewUsersTestCase, self).setUp() - self.osclients_patcher = mock.patch("%s.osclients" % CTX) - self.osclients = self.osclients_patcher.start() - self.addCleanup(self.osclients_patcher.stop) - - # Force the case of creating new users - self.platforms = { - "openstack": { - "admin": {"username": "root", - "password": "qwerty", - "auth_url": "https://example.com"}, - "users": [] - } - } - - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - "resource_management_workers": self.threads, - } - }, - "env": {"platforms": self.platforms}, - "users": [], - "task": {"uuid": "task_id", "deployment_uuid": "dep_uuid"} - }) - - @mock.patch("%s.network.wrap" % CTX) - def test__remove_default_security_group_not_needed(self, mock_wrap): - services = {"compute": consts.Service.NOVA} - self.osclients.Clients().services.return_value = services - user_generator = users.UserGenerator(self.context) - user_generator._remove_default_security_group() - self.assertFalse(mock_wrap.called) - - @mock.patch("%s.network.wrap" % CTX) - def test__remove_default_security_group_neutron_no_sg(self, mock_wrap): - net_wrapper = mock.Mock(SERVICE_IMPL=consts.Service.NEUTRON) - net_wrapper.supports_extension.return_value = (False, None) - mock_wrap.return_value = net_wrapper - - user_generator = users.UserGenerator(self.context) - - admin_clients = mock.Mock() - admin_clients.services.return_value = { - "compute": consts.Service.NOVA, - "neutron": consts.Service.NEUTRON} - user_clients = [mock.Mock(), mock.Mock()] - self.osclients.Clients.side_effect = [admin_clients] + user_clients - - user_generator._remove_default_security_group() - - mock_wrap.assert_called_once_with(admin_clients, user_generator) - net_wrapper.supports_extension.assert_called_once_with( - "security-group") - - @mock.patch("rally.common.utils.iterate_per_tenants") - @mock.patch("%s.network" % CTX) - def test__remove_default_security_group(self, mock_network, - mock_iterate_per_tenants): - net_wrapper = mock.Mock(SERVICE_IMPL=consts.Service.NEUTRON) - net_wrapper.supports_extension.return_value = (True, None) - mock_network.wrap.return_value = net_wrapper - - user_generator = users.UserGenerator(self.context) - - admin_clients = mock.Mock() - admin_clients.services.return_value = { - "compute": consts.Service.NOVA, - "neutron": consts.Service.NEUTRON} - user1 = mock.Mock() - user1.neutron.return_value.list_security_groups.return_value = { - "security_groups": [{"id": "id-1", "name": "default"}, - {"id": "id-2", "name": "not-default"}]} - user2 = mock.Mock() - user2.neutron.return_value.list_security_groups.return_value = { - "security_groups": [{"id": "id-3", "name": "default"}, - {"id": "id-4", "name": "not-default"}]} - user_clients = [user1, user2] - self.osclients.Clients.side_effect = [admin_clients] + user_clients - - mock_iterate_per_tenants.return_value = [ - (mock.MagicMock(), "t1"), - (mock.MagicMock(), "t2")] - - user_generator._remove_default_security_group() - - mock_network.wrap.assert_called_once_with(admin_clients, - user_generator) - - mock_iterate_per_tenants.assert_called_once_with( - user_generator.context["users"]) - expected = [mock.call(user_generator.credential)] + [ - mock.call(u["credential"]) - for u, t in mock_iterate_per_tenants.return_value] - self.osclients.Clients.assert_has_calls(expected, any_order=True) - - user_net = user1.neutron.return_value - user_net.list_security_groups.assert_called_once_with(tenant_id="t1") - user_net = user2.neutron.return_value - user_net.list_security_groups.assert_called_once_with(tenant_id="t2") - admin_neutron = admin_clients.neutron.return_value - self.assertEqual( - [mock.call("id-1"), mock.call("id-3")], - admin_neutron.delete_security_group.call_args_list) - - @mock.patch("%s.identity" % CTX) - def test__create_tenants(self, mock_identity): - self.context["config"]["users"]["tenants"] = 1 - user_generator = users.UserGenerator(self.context) - tenants = user_generator._create_tenants() - self.assertEqual(1, len(tenants)) - id, tenant = tenants.popitem() - self.assertIn("name", tenant) - - @mock.patch("%s.identity" % CTX) - def test__create_users(self, mock_identity): - self.context["config"]["users"]["users_per_tenant"] = 2 - user_generator = users.UserGenerator(self.context) - user_generator.context["tenants"] = {"t1": {"id": "t1", "name": "t1"}, - "t2": {"id": "t2", "name": "t2"}} - users_ = user_generator._create_users() - self.assertEqual(4, len(users_)) - for user in users_: - self.assertIn("id", user) - self.assertIn("credential", user) - - @mock.patch("%s.identity" % CTX) - def test__delete_tenants(self, mock_identity): - user_generator = users.UserGenerator(self.context) - user_generator.context["tenants"] = {"t1": {"id": "t1", "name": "t1"}, - "t2": {"id": "t2", "name": "t2"}} - user_generator._delete_tenants() - self.assertEqual(0, len(user_generator.context["tenants"])) - - @mock.patch("%s.identity" % CTX) - def test__delete_tenants_failure(self, mock_identity): - identity_service = mock_identity.Identity.return_value - identity_service.delete_project.side_effect = Exception() - user_generator = users.UserGenerator(self.context) - user_generator.context["tenants"] = {"t1": {"id": "t1", "name": "t1"}, - "t2": {"id": "t2", "name": "t2"}} - user_generator._delete_tenants() - self.assertEqual(0, len(user_generator.context["tenants"])) - - @mock.patch("%s.identity" % CTX) - def test__delete_users(self, mock_identity): - user_generator = users.UserGenerator(self.context) - user1 = mock.MagicMock() - user2 = mock.MagicMock() - user_generator.context["users"] = [user1, user2] - user_generator._delete_users() - self.assertEqual(0, len(user_generator.context["users"])) - - @mock.patch("%s.identity" % CTX) - def test__delete_users_failure(self, mock_identity): - identity_service = mock_identity.Identity.return_value - identity_service.delete_user.side_effect = Exception() - user_generator = users.UserGenerator(self.context) - user1 = mock.MagicMock() - user2 = mock.MagicMock() - user_generator.context["users"] = [user1, user2] - user_generator._delete_users() - self.assertEqual(0, len(user_generator.context["users"])) - - @mock.patch("%s.identity" % CTX) - def test_setup_and_cleanup(self, mock_identity): - with users.UserGenerator(self.context) as ctx: - - ctx.setup() - - self.assertEqual(self.users_num, - len(ctx.context["users"])) - self.assertEqual(self.tenants_num, - len(ctx.context["tenants"])) - - self.assertEqual("random", ctx.context["user_choice_method"]) - - # Cleanup (called by content manager) - self.assertEqual(0, len(ctx.context["users"])) - self.assertEqual(0, len(ctx.context["tenants"])) - - @mock.patch("rally.common.broker.LOG.warning") - @mock.patch("%s.identity" % CTX) - def test_setup_and_cleanup_with_error_during_create_user( - self, mock_identity, mock_log_warning): - identity_service = mock_identity.Identity.return_value - identity_service.create_user.side_effect = Exception() - with users.UserGenerator(self.context) as ctx: - self.assertRaises(exceptions.ContextSetupFailure, ctx.setup) - mock_log_warning.assert_called_with( - "Failed to consume a task from the queue: ") - - # Ensure that tenants get deleted anyway - self.assertEqual(0, len(ctx.context["tenants"])) - - @mock.patch("%s.identity" % CTX) - def test_users_and_tenants_in_context(self, mock_identity): - identity_service = mock_identity.Identity.return_value - - credential = oscredential.OpenStackCredential( - "foo_url", "foo", "foo_pass", - https_insecure=True, - https_cacert="cacert") - tmp_context = dict(self.context) - tmp_context["config"]["users"] = {"tenants": 1, - "users_per_tenant": 2, - "resource_management_workers": 1} - tmp_context["env"]["platforms"]["openstack"]["admin"] = credential - - credential_dict = credential.to_dict() - user_list = [mock.MagicMock(id="id_%d" % i) - for i in range(self.users_num)] - identity_service.create_user.side_effect = user_list - - with users.UserGenerator(tmp_context) as ctx: - ctx.generate_random_name = mock.Mock() - ctx.setup() - - create_tenant_calls = [] - for i, t in enumerate(ctx.context["tenants"]): - create_tenant_calls.append( - mock.call(ctx.generate_random_name.return_value, - ctx.config["project_domain"])) - - for user in ctx.context["users"]: - self.assertEqual(set(["id", "credential", "tenant_id"]), - set(user.keys())) - - user_credential_dict = user["credential"].to_dict() - - excluded_keys = ["auth_url", "username", "password", - "tenant_name", "region_name", - "project_domain_name", - "user_domain_name", "permission"] - for key in (set(credential_dict.keys()) - set(excluded_keys)): - self.assertEqual(credential_dict[key], - user_credential_dict[key], - "The key '%s' differs." % key) - - tenants_ids = [] - for t in ctx.context["tenants"].keys(): - tenants_ids.append(t) - - for (user, tenant_id, orig_user) in zip(ctx.context["users"], - tenants_ids, user_list): - self.assertEqual(orig_user.id, user["id"]) - self.assertEqual(tenant_id, user["tenant_id"]) - - @mock.patch("%s.identity" % CTX) - def test_users_contains_correct_endpoint_type(self, mock_identity): - credential = oscredential.OpenStackCredential( - "foo_url", "foo", "foo_pass", - endpoint_type=consts.EndpointType.INTERNAL) - config = { - "config": { - "users": { - "tenants": 1, - "users_per_tenant": 2, - "resource_management_workers": 1 - } - }, - "env": {"platforms": {"openstack": {"admin": credential, - "users": []}}}, - "task": {"uuid": "task_id", "deployment_uuid": "deployment_id"} - } - - user_generator = users.UserGenerator(config) - users_ = user_generator._create_users() - - for user in users_: - self.assertEqual("internal", user["credential"].endpoint_type) - - @mock.patch("%s.identity" % CTX) - def test_users_contains_default_endpoint_type(self, mock_identity): - credential = oscredential.OpenStackCredential( - "foo_url", "foo", "foo_pass") - config = { - "config": { - "users": { - "tenants": 1, - "users_per_tenant": 2, - "resource_management_workers": 1 - } - }, - "env": {"platforms": {"openstack": {"admin": credential, - "users": []}}}, - "task": {"uuid": "task_id", "deployment_uuid": "deployment_id"} - } - - user_generator = users.UserGenerator(config) - users_ = user_generator._create_users() - - for user in users_: - self.assertEqual("public", user["credential"].endpoint_type) diff --git a/tests/unit/plugins/openstack/context/magnum/__init__.py b/tests/unit/plugins/openstack/context/magnum/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/magnum/test_ca_certs.py b/tests/unit/plugins/openstack/context/magnum/test_ca_certs.py deleted file mode 100644 index 34e5e07c50..0000000000 --- a/tests/unit/plugins/openstack/context/magnum/test_ca_certs.py +++ /dev/null @@ -1,248 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.magnum import ca_certs -from tests.unit import test - -CTX = "rally.plugins.openstack.context.magnum" -SCN = "rally.plugins.openstack.scenarios" - - -class CaCertsGeneratorTestCase(test.ScenarioTestCase): - - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = {"name": str(id_)} - tenants[str(id_)]["cluster"] = "rally_cluster_uuid" - return tenants - - def test__generate_csr_and_key(self): - - ca_cert_ctx = ca_certs.CaCertGenerator(self.context) - result = ca_cert_ctx._generate_csr_and_key() - - assert result["csr"] is not None - assert result["key"] is not None - - @mock.patch("%s.magnum.utils.MagnumScenario._create_ca_certificate" % SCN) - @mock.patch("%s.magnum.utils.MagnumScenario._get_ca_certificate" % SCN) - @mock.patch("%s.ca_certs.open" % CTX, side_effect=mock.mock_open(), - create=True) - @mock.patch("%s.ca_certs.CaCertGenerator._generate_csr_and_key" - % CTX) - @mock.patch("%s.magnum.utils.MagnumScenario._get_cluster_template" % SCN) - @mock.patch("%s.magnum.utils.MagnumScenario._get_cluster" % SCN, - return_value=mock.Mock()) - def test_setup(self, mock_magnum_scenario__get_cluster, - mock_magnum_scenario__get_cluster_template, - mock_ca_cert_generator__generate_csr_and_key, - mock_open, - mock_magnum_scenario__get_ca_certificate, - mock_magnum_scenario__create_ca_certificate): - tenants_count = 2 - users_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for ten_id in tenants: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": ten_id, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": tenants_count, - "users_per_tenant": users_per_tenant, - "concurrent": 10, - }, - "clusters": { - "cluster_template_uuid": "123456789", - "node_count": 2 - }, - "ca_certs": { - "directory": "" - } - }, - "users": users, - "tenants": tenants - }) - - fake_ct = mock.Mock() - fake_ct.tls_disabled = False - mock_magnum_scenario__get_cluster_template.return_value = fake_ct - fake_tls = {"csr": "fake_csr", "key": "fake_key"} - mock_ca_cert_generator__generate_csr_and_key.return_value = fake_tls - fake_ca_cert = mock.Mock() - fake_ca_cert.pem = "fake_ca_cert" - mock_magnum_scenario__get_ca_certificate.return_value = fake_ca_cert - fake_cert = mock.Mock() - fake_cert.pem = "fake_cert" - mock_magnum_scenario__create_ca_certificate.return_value = fake_cert - - ca_cert_ctx = ca_certs.CaCertGenerator(self.context) - ca_cert_ctx.setup() - - mock_cluster = mock_magnum_scenario__get_cluster.return_value - mock_calls = [mock.call(mock_cluster.cluster_template_id) - for i in range(tenants_count)] - mock_magnum_scenario__get_cluster_template.assert_has_calls( - mock_calls) - mock_calls = [mock.call("rally_cluster_uuid") - for i in range(tenants_count)] - mock_magnum_scenario__get_cluster.assert_has_calls(mock_calls) - mock_magnum_scenario__get_ca_certificate.assert_has_calls(mock_calls) - fake_csr_req = {"cluster_uuid": "rally_cluster_uuid", - "csr": fake_tls["csr"]} - mock_calls = [mock.call(fake_csr_req) - for i in range(tenants_count)] - mock_magnum_scenario__create_ca_certificate.assert_has_calls( - mock_calls) - - @mock.patch("%s.magnum.utils.MagnumScenario._create_ca_certificate" % SCN) - @mock.patch("%s.magnum.utils.MagnumScenario._get_ca_certificate" % SCN) - @mock.patch("%s.magnum.utils.MagnumScenario._get_cluster_template" % SCN) - @mock.patch("%s.magnum.utils.MagnumScenario._get_cluster" % SCN, - return_value=mock.Mock()) - def test_tls_disabled_setup(self, mock_magnum_scenario__get_cluster, - mock_magnum_scenario__get_cluster_template, - mock_magnum_scenario__get_ca_certificate, - mock_magnum_scenario__create_ca_certificate): - tenants_count = 2 - users_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for ten_id in tenants: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": ten_id, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": tenants_count, - "users_per_tenant": users_per_tenant, - "concurrent": 10, - }, - "clusters": { - "cluster_template_uuid": "123456789", - "node_count": 2 - }, - "ca_certs": { - "directory": "" - } - }, - "users": users, - "tenants": tenants - }) - - fake_ct = mock.Mock() - fake_ct.tls_disabled = True - mock_magnum_scenario__get_cluster_template.return_value = fake_ct - - ca_cert_ctx = ca_certs.CaCertGenerator(self.context) - ca_cert_ctx.setup() - - mock_cluster = mock_magnum_scenario__get_cluster.return_value - mock_calls = [mock.call(mock_cluster.cluster_template_id) - for i in range(tenants_count)] - mock_magnum_scenario__get_cluster_template.assert_has_calls( - mock_calls) - mock_calls = [mock.call("rally_cluster_uuid") - for i in range(tenants_count)] - mock_magnum_scenario__get_cluster.assert_has_calls(mock_calls) - mock_magnum_scenario__get_ca_certificate.assert_not_called() - mock_magnum_scenario__create_ca_certificate.assert_not_called() - - @mock.patch("os.remove", return_value=mock.Mock()) - @mock.patch("os.path.join", return_value=mock.Mock()) - @mock.patch("%s.magnum.utils.MagnumScenario._get_cluster_template" % SCN) - @mock.patch("%s.magnum.utils.MagnumScenario._get_cluster" % SCN, - return_value=mock.Mock()) - def test_cleanup(self, mock_magnum_scenario__get_cluster, - mock_magnum_scenario__get_cluster_template, - mock_os_path_join, mock_os_remove): - - tenants_count = 2 - users_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for ten_id in tenants: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": ten_id, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - }, - "ca_certs_directory": "", - "users": users, - "tenants": tenants - }) - - fake_ct = mock.Mock() - fake_ct.tls_disabled = False - mock_magnum_scenario__get_cluster_template.return_value = fake_ct - - ca_cert_ctx = ca_certs.CaCertGenerator(self.context) - ca_cert_ctx.cleanup() - - cluster_uuid = "rally_cluster_uuid" - dir = self.context["ca_certs_directory"] - mock_os_path_join.assert_has_calls(dir, cluster_uuid.__add__(".key")) - mock_os_path_join.assert_has_calls( - dir, cluster_uuid.__add__("_ca.crt")) - mock_os_path_join.assert_has_calls(dir, cluster_uuid.__add__(".crt")) - - @mock.patch("os.remove", return_value=mock.Mock()) - @mock.patch("os.path.join", return_value=mock.Mock()) - @mock.patch("%s.magnum.utils.MagnumScenario._get_cluster_template" % SCN) - @mock.patch("%s.magnum.utils.MagnumScenario._get_cluster" % SCN, - return_value=mock.Mock()) - def test_tls_disabled_cleanup(self, mock_magnum_scenario__get_cluster, - mock_magnum_scenario__get_cluster_template, - mock_os_path_join, mock_os_remove): - - tenants_count = 2 - users_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for ten_id in tenants: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": ten_id, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - }, - "ca_certs_directory": "", - "users": users, - "tenants": tenants - }) - - fake_ct = mock.Mock() - fake_ct.tls_disabled = True - mock_magnum_scenario__get_cluster_template.return_value = fake_ct - - ca_cert_ctx = ca_certs.CaCertGenerator(self.context) - ca_cert_ctx.cleanup() - - mock_os_path_join.assert_not_called() - mock_os_remove.assert_not_called() diff --git a/tests/unit/plugins/openstack/context/magnum/test_cluster_templates.py b/tests/unit/plugins/openstack/context/magnum/test_cluster_templates.py deleted file mode 100644 index 89e0bc3dff..0000000000 --- a/tests/unit/plugins/openstack/context/magnum/test_cluster_templates.py +++ /dev/null @@ -1,108 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.magnum import cluster_templates -from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils -from tests.unit import fakes -from tests.unit import test - - -BASE_CTX = "rally.task.context" -CTX = "rally.plugins.openstack.context" -BASE_SCN = "rally.task.scenarios" -SCN = "rally.plugins.openstack.scenarios" - - -class ClusterTemplatesGeneratorTestCase(test.ScenarioTestCase): - - """Generate tenants.""" - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = dict(name=str(id_)) - return tenants - - @mock.patch("%s.magnum.utils.MagnumScenario." - "_create_cluster_template" % SCN, - return_value=fakes.FakeClusterTemplate(id="uuid")) - def test_setup(self, mock__create_cluster_template): - tenants_count = 2 - users_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for ten_id in tenants: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": ten_id, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": tenants_count, - "users_per_tenant": users_per_tenant, - "concurrent": 10, - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - } - }, - "users": users, - "tenants": tenants - }) - - ct_ctx = cluster_templates.ClusterTemplateGenerator(self.context) - ct_ctx.setup() - - ct_ctx_config = self.context["config"]["cluster_templates"] - image_id = ct_ctx_config.get("image_id") - external_network_id = ct_ctx_config.get( - "external_network_id") - dns_nameserver = ct_ctx_config.get("dns_nameserver") - flavor_id = ct_ctx_config.get("flavor_id") - docker_volume_size = ct_ctx_config.get("docker_volume_size") - network_driver = ct_ctx_config.get("network_driver") - coe = ct_ctx_config.get("coe") - mock_calls = [mock.call(image_id=image_id, - external_network_id=external_network_id, - dns_nameserver=dns_nameserver, - flavor_id=flavor_id, - docker_volume_size=docker_volume_size, - network_driver=network_driver, coe=coe) - for i in range(tenants_count)] - mock__create_cluster_template.assert_has_calls(mock_calls) - - # check that stack ids have been saved in context - for ten_id in self.context["tenants"].keys(): - self.assertIsNotNone( - self.context["tenants"][ten_id]["cluster_template"]) - - @mock.patch("%s.magnum.cluster_templates.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup): - self.context.update({ - "users": mock.MagicMock() - }) - ct_ctx = cluster_templates.ClusterTemplateGenerator(self.context) - ct_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["magnum.cluster_templates"], - users=self.context["users"], - superclass=magnum_utils.MagnumScenario, - task_id=self.context["owner_id"]) diff --git a/tests/unit/plugins/openstack/context/magnum/test_clusters.py b/tests/unit/plugins/openstack/context/magnum/test_clusters.py deleted file mode 100644 index cd927bf18a..0000000000 --- a/tests/unit/plugins/openstack/context/magnum/test_clusters.py +++ /dev/null @@ -1,158 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import copy - -import mock - -from rally.plugins.openstack.context.magnum import clusters -from rally.plugins.openstack.scenarios.magnum import utils as magnum_utils -from tests.unit import test - -CTX = "rally.plugins.openstack.context.magnum" -SCN = "rally.plugins.openstack.scenarios" - - -class ClustersGeneratorTestCase(test.ScenarioTestCase): - - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = {"name": str(id_)} - return tenants - - def _gen_tenants_with_cluster_template(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = {"name": str(id_)} - tenants[str(id_)]["cluster_template"] = "rally_ct_uuid" - return tenants - - @mock.patch("%s.magnum.utils.MagnumScenario._create_cluster" % SCN, - return_value=mock.Mock()) - @mock.patch("%s.nova.utils.NovaScenario._create_keypair" % SCN, - return_value="key1") - def test_setup_using_existing_cluster_template( - self, mock__create_keypair, mock__create_cluster): - tenants_count = 2 - users_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for ten_id in tenants: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": ten_id, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": tenants_count, - "users_per_tenant": users_per_tenant, - "concurrent": 10, - }, - "clusters": { - "cluster_template_uuid": "123456789", - "node_count": 2 - } - }, - "users": users, - "tenants": tenants - }) - - mock_cluster = mock__create_cluster.return_value - new_context = copy.deepcopy(self.context) - for id_ in new_context["tenants"]: - new_context["tenants"][id_]["cluster"] = mock_cluster.uuid - - cluster_ctx = clusters.ClusterGenerator(self.context) - cluster_ctx.setup() - - self.assertEqual(new_context, self.context) - cluster_ctx_config = self.context["config"]["clusters"] - node_count = cluster_ctx_config.get("node_count") - cluster_template_uuid = cluster_ctx_config.get("cluster_template_uuid") - mock_calls = [mock.call(cluster_template=cluster_template_uuid, - keypair="key1", node_count=node_count) - for i in range(tenants_count)] - mock__create_cluster.assert_has_calls(mock_calls) - - @mock.patch("%s.magnum.utils.MagnumScenario._create_cluster" % SCN, - return_value=mock.Mock()) - @mock.patch("%s.nova.utils.NovaScenario._create_keypair" % SCN, - return_value="key1") - def test_setup(self, mock__create_keypair, mock__create_cluster): - tenants_count = 2 - users_per_tenant = 5 - - tenants = self._gen_tenants_with_cluster_template(tenants_count) - users = [] - for ten_id in tenants: - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": ten_id, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": tenants_count, - "users_per_tenant": users_per_tenant, - "concurrent": 10, - }, - "cluster_templates": { - "dns_nameserver": "8.8.8.8", - "external_network_id": "public", - "flavor_id": "m1.small", - "docker_volume_size": 5, - "coe": "kubernetes", - "image_id": "fedora-atomic-latest", - "network_driver": "flannel" - }, - "clusters": { - "node_count": 2 - } - }, - "users": users, - "tenants": tenants - }) - - mock_cluster = mock__create_cluster.return_value - new_context = copy.deepcopy(self.context) - for id_ in new_context["tenants"]: - new_context["tenants"][id_]["cluster"] = mock_cluster.uuid - - cluster_ctx = clusters.ClusterGenerator(self.context) - cluster_ctx.setup() - - self.assertEqual(new_context, self.context) - cluster_ctx_config = self.context["config"]["clusters"] - node_count = cluster_ctx_config.get("node_count") - mock_calls = [mock.call(cluster_template="rally_ct_uuid", - keypair="key1", node_count=node_count) - for i in range(tenants_count)] - mock__create_cluster.assert_has_calls(mock_calls) - - @mock.patch("%s.cluster_templates.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup): - self.context.update({ - "users": mock.MagicMock() - }) - clusters_ctx = clusters.ClusterGenerator(self.context) - clusters_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["magnum.clusters", "nova.keypairs"], - users=self.context["users"], - superclass=magnum_utils.MagnumScenario, - task_id=self.context["owner_id"]) diff --git a/tests/unit/plugins/openstack/context/manila/__init__.py b/tests/unit/plugins/openstack/context/manila/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/manila/test_manila_security_services.py b/tests/unit/plugins/openstack/context/manila/test_manila_security_services.py deleted file mode 100644 index fed16d4d1b..0000000000 --- a/tests/unit/plugins/openstack/context/manila/test_manila_security_services.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -import six - -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.context.manila import manila_security_services -from rally.plugins.openstack.scenarios.manila import utils as manila_utils -from tests.unit import test - -CONTEXT_NAME = consts.SECURITY_SERVICES_CONTEXT_NAME - - -@ddt.ddt -class SecurityServicesTestCase(test.ScenarioTestCase): - TENANTS_AMOUNT = 3 - USERS_PER_TENANT = 4 - SECURITY_SERVICES = [ - {"security_service_type": ss_type, - "dns_ip": "fake_dns_ip_%s" % ss_type, - "server": "fake_server_%s" % ss_type, - "domain": "fake_domain_%s" % ss_type, - "user": "fake_user_%s" % ss_type, - "password": "fake_password_%s" % ss_type} - for ss_type in ("ldap", "kerberos", "active_directory") - ] - - def _get_context(self, security_services=None, networks_per_tenant=2, - neutron_network_provider=True): - if security_services is None: - security_services = self.SECURITY_SERVICES - tenants = {} - for t_id in range(self.TENANTS_AMOUNT): - tenants[six.text_type(t_id)] = {"name": six.text_type(t_id)} - tenants[six.text_type(t_id)]["networks"] = [] - for i in range(networks_per_tenant): - network = {"id": "fake_net_id_%s" % i} - if neutron_network_provider: - network["subnets"] = ["fake_subnet_id_of_net_%s" % i] - else: - network["cidr"] = "101.0.5.0/24" - tenants[six.text_type(t_id)]["networks"].append(network) - users = [] - for t_id in tenants.keys(): - for i in range(self.USERS_PER_TENANT): - users.append({"id": i, "tenant_id": t_id, "endpoint": "fake"}) - context = { - "config": { - "users": { - "tenants": self.TENANTS_AMOUNT, - "users_per_tenant": self.USERS_PER_TENANT, - }, - CONTEXT_NAME: { - "security_services": security_services, - }, - }, - "admin": { - "endpoint": mock.MagicMock(), - }, - "task": mock.MagicMock(), - "owner_id": "foo_uuid", - "users": users, - "tenants": tenants, - } - return context - - def test_init(self): - context = { - "task": mock.MagicMock(), - "config": { - CONTEXT_NAME: {"foo": "bar"}, - "not_manila": {"not_manila_key": "not_manila_value"}, - } - } - - inst = manila_security_services.SecurityServices(context) - - self.assertEqual("bar", inst.config.get("foo")) - self.assertFalse(inst.config.get("security_services")) - self.assertEqual(445, inst.get_order()) - self.assertEqual(CONTEXT_NAME, inst.get_name()) - - @mock.patch.object(manila_security_services.manila_utils, "ManilaScenario") - @ddt.data(True, False) - def test_setup_security_services_set(self, neutron_network_provider, - mock_manila_scenario): - ctxt = self._get_context( - neutron_network_provider=neutron_network_provider) - inst = manila_security_services.SecurityServices(ctxt) - - inst.setup() - - self.assertEqual( - self.TENANTS_AMOUNT, mock_manila_scenario.call_count) - self.assertEqual( - mock_manila_scenario.call_args_list, - [mock.call({ - "task": inst.task, - "owner_id": "foo_uuid", - "config": {"api_versions": []}, - "user": user}) - for user in inst.context["users"] if user["id"] == 0] - ) - mock_create_security_service = ( - mock_manila_scenario.return_value._create_security_service) - expected_calls = [] - for ss in self.SECURITY_SERVICES: - expected_calls.extend([mock.call(**ss), mock.call().to_dict()]) - mock_create_security_service.assert_has_calls(expected_calls) - self.assertEqual( - self.TENANTS_AMOUNT * len(self.SECURITY_SERVICES), - mock_create_security_service.call_count) - self.assertEqual( - self.TENANTS_AMOUNT, - len(inst.context["config"][CONTEXT_NAME]["security_services"])) - for tenant in inst.context["tenants"]: - self.assertEqual( - self.TENANTS_AMOUNT, - len(inst.context["tenants"][tenant][CONTEXT_NAME][ - "security_services"]) - ) - - @mock.patch.object(manila_security_services.manila_utils, "ManilaScenario") - def test_setup_security_services_not_set(self, mock_manila_scenario): - ctxt = self._get_context(security_services=[]) - inst = manila_security_services.SecurityServices(ctxt) - - inst.setup() - - self.assertFalse(mock_manila_scenario.called) - self.assertFalse( - mock_manila_scenario.return_value._create_security_service.called) - self.assertIn(CONTEXT_NAME, inst.context["config"]) - self.assertIn( - "security_services", inst.context["config"][CONTEXT_NAME]) - self.assertEqual( - 0, - len(inst.context["config"][CONTEXT_NAME]["security_services"])) - for tenant in inst.context["tenants"]: - self.assertEqual( - 0, - len(inst.context["tenants"][tenant][CONTEXT_NAME][ - "security_services"]) - ) - - @mock.patch.object(manila_security_services, "resource_manager") - def test_cleanup_security_services_enabled(self, mock_resource_manager): - ctxt = self._get_context() - inst = manila_security_services.SecurityServices(ctxt) - - inst.cleanup() - - mock_resource_manager.cleanup.assert_called_once_with( - names=["manila.security_services"], - users=ctxt["users"], - superclass=manila_utils.ManilaScenario, - task_id="foo_uuid") diff --git a/tests/unit/plugins/openstack/context/manila/test_manila_share_networks.py b/tests/unit/plugins/openstack/context/manila/test_manila_share_networks.py deleted file mode 100644 index ec5423fefa..0000000000 --- a/tests/unit/plugins/openstack/context/manila/test_manila_share_networks.py +++ /dev/null @@ -1,403 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import ddt -import mock -import six - -from rally import exceptions -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.context.manila import manila_share_networks -from tests.unit import test - -MANILA_UTILS_PATH = ( - "rally.plugins.openstack.scenarios.manila.utils.ManilaScenario.") - -MOCK_USER_CREDENTIAL = mock.MagicMock() - - -class Fake(object): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - setattr(self, k, v) - - def __getitem__(self, item): - return getattr(self, item) - - def to_dict(self): - return self.__dict__ - - -@ddt.ddt -class ShareNetworksTestCase(test.TestCase): - TENANTS_AMOUNT = 3 - USERS_PER_TENANT = 4 - SECURITY_SERVICES = [ - {"type": ss_type, - "dns_ip": "fake_dns_ip_%s" % ss_type, - "server": "fake_server_%s" % ss_type, - "domain": "fake_domain_%s" % ss_type, - "user": "fake_user_%s" % ss_type, - "password": "fake_password_%s" % ss_type, - "name": "fake_optional_name_%s" % ss_type} - for ss_type in ("ldap", "kerberos", "active_directory") - ] - - def _get_context(self, use_security_services=False, networks_per_tenant=2, - neutron_network_provider=True): - tenants = {} - for t_id in range(self.TENANTS_AMOUNT): - tenants[six.text_type(t_id)] = {"name": six.text_type(t_id)} - tenants[six.text_type(t_id)]["networks"] = [] - for i in range(networks_per_tenant): - network = {"id": "fake_net_id_%s" % i} - if neutron_network_provider: - network["subnets"] = ["fake_subnet_id_of_net_%s" % i] - else: - network["cidr"] = "101.0.5.0/24" - tenants[six.text_type(t_id)]["networks"].append(network) - users = [] - for t_id in tenants.keys(): - for i in range(self.USERS_PER_TENANT): - users.append({ - "id": i, "tenant_id": t_id, - "credential": MOCK_USER_CREDENTIAL}) - context = { - "config": { - "users": { - "tenants": self.TENANTS_AMOUNT, - "users_per_tenant": self.USERS_PER_TENANT, - "random_user_choice": False, - }, - consts.SHARE_NETWORKS_CONTEXT_NAME: { - "use_share_networks": True, - "share_networks": [], - }, - consts.SECURITY_SERVICES_CONTEXT_NAME: { - "security_services": ( - self.SECURITY_SERVICES - if use_security_services else []) - }, - "network": { - "networks_per_tenant": networks_per_tenant, - "start_cidr": "101.0.5.0/24", - }, - }, - "admin": { - "credential": mock.MagicMock(), - }, - "task": mock.MagicMock(), - "users": users, - "tenants": tenants, - "user_choice_method": "random", - } - return context - - def setUp(self): - super(self.__class__, self).setUp() - self.ctxt_use_existing = { - "task": mock.MagicMock(), - "config": { - "existing_users": {"foo": "bar"}, - consts.SHARE_NETWORKS_CONTEXT_NAME: { - "use_share_networks": True, - "share_networks": { - "tenant_1_id": ["sn_1_id", "sn_2_name"], - "tenant_2_name": ["sn_3_id", "sn_4_name", "sn_5_id"], - }, - }, - }, - "tenants": { - "tenant_1_id": {"id": "tenant_1_id", "name": "tenant_1_name"}, - "tenant_2_id": {"id": "tenant_2_id", "name": "tenant_2_name"}, - }, - "users": [ - {"tenant_id": "tenant_1_id", "credential": mock.MagicMock()}, - {"tenant_id": "tenant_2_id", "credential": mock.MagicMock()}, - ], - } - self.existing_sns = [ - Fake(id="sn_%s_id" % i, name="sn_%s_name" % i) for i in range(1, 6) - ] - - def test_init(self): - context = { - "task": mock.MagicMock(), - "config": { - consts.SHARE_NETWORKS_CONTEXT_NAME: {"foo": "bar"}, - "not_manila": {"not_manila_key": "not_manila_value"}, - }, - } - - inst = manila_share_networks.ShareNetworks(context) - - self.assertEqual( - {"foo": "bar", "share_networks": {}, "use_share_networks": False}, - inst.config) - - def test_setup_share_networks_disabled(self): - ctxt = { - "task": mock.MagicMock(), - "config": { - consts.SHARE_NETWORKS_CONTEXT_NAME: { - "use_share_networks": False, - }, - }, - consts.SHARE_NETWORKS_CONTEXT_NAME: {}, - } - inst = manila_share_networks.ShareNetworks(ctxt) - - expected_ctxt = copy.deepcopy(inst.context) - - inst.setup() - - self.assertEqual(expected_ctxt, inst.context) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(MANILA_UTILS_PATH + "_list_share_networks") - def test_setup_use_existing_share_networks( - self, mock_manila_scenario__list_share_networks, mock_clients): - existing_sns = self.existing_sns - expected_ctxt = copy.deepcopy(self.ctxt_use_existing) - inst = manila_share_networks.ShareNetworks(self.ctxt_use_existing) - mock_manila_scenario__list_share_networks.return_value = ( - self.existing_sns) - expected_ctxt.update({ - "delete_share_networks": False, - "tenants": { - "tenant_1_id": { - "id": "tenant_1_id", - "name": "tenant_1_name", - consts.SHARE_NETWORKS_CONTEXT_NAME: { - "share_networks": [ - sn.to_dict() for sn in existing_sns[0:2]], - }, - }, - "tenant_2_id": { - "id": "tenant_2_id", - "name": "tenant_2_name", - consts.SHARE_NETWORKS_CONTEXT_NAME: { - "share_networks": [ - sn.to_dict() for sn in existing_sns[2:5]], - }, - }, - } - }) - - inst.setup() - - self.assertEqual(expected_ctxt["task"], inst.context.get("task")) - self.assertEqual(expected_ctxt["config"], inst.context.get("config")) - self.assertEqual(expected_ctxt["users"], inst.context.get("users")) - self.assertFalse( - inst.context.get(consts.SHARE_NETWORKS_CONTEXT_NAME, {}).get( - "delete_share_networks")) - self.assertEqual(expected_ctxt["tenants"], inst.context.get("tenants")) - - def test_setup_use_existing_share_networks_tenant_not_found(self): - ctxt = copy.deepcopy(self.ctxt_use_existing) - ctxt.update({"tenants": {}}) - inst = manila_share_networks.ShareNetworks(ctxt) - - self.assertRaises(exceptions.ContextSetupFailure, inst.setup) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(MANILA_UTILS_PATH + "_list_share_networks") - def test_setup_use_existing_share_networks_sn_not_found( - self, mock_manila_scenario__list_share_networks, mock_clients): - ctxt = copy.deepcopy(self.ctxt_use_existing) - ctxt["config"][consts.SHARE_NETWORKS_CONTEXT_NAME][ - "share_networks"] = {"tenant_1_id": ["foo"]} - inst = manila_share_networks.ShareNetworks(ctxt) - mock_manila_scenario__list_share_networks.return_value = ( - self.existing_sns) - - self.assertRaises(exceptions.ContextSetupFailure, inst.setup) - - def test_setup_use_existing_share_networks_with_empty_list(self): - ctxt = copy.deepcopy(self.ctxt_use_existing) - ctxt["config"][consts.SHARE_NETWORKS_CONTEXT_NAME][ - "share_networks"] = {} - inst = manila_share_networks.ShareNetworks(ctxt) - - self.assertRaises(exceptions.ContextSetupFailure, inst.setup) - - @ddt.data(True, False) - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(MANILA_UTILS_PATH + "_create_share_network") - @mock.patch(MANILA_UTILS_PATH + "_add_security_service_to_share_network") - def test_setup_autocreate_share_networks_with_security_services( - self, - neutron, - mock_manila_scenario__add_security_service_to_share_network, - mock_manila_scenario__create_share_network, - mock_clients): - networks_per_tenant = 2 - ctxt = self._get_context( - networks_per_tenant=networks_per_tenant, - neutron_network_provider=neutron, - use_security_services=True, - ) - inst = manila_share_networks.ShareNetworks(ctxt) - for tenant_id in list(ctxt["tenants"].keys()): - inst.context["tenants"][tenant_id][ - consts.SECURITY_SERVICES_CONTEXT_NAME] = { - "security_services": [ - Fake(id="fake_id").to_dict() for i in (1, 2, 3) - ] - } - - inst.setup() - - self.assertEqual(ctxt["task"], inst.context.get("task")) - self.assertEqual(ctxt["config"], inst.context.get("config")) - self.assertEqual(ctxt["users"], inst.context.get("users")) - self.assertEqual(ctxt["tenants"], inst.context.get("tenants")) - mock_add_security_service_to_share_network = ( - mock_manila_scenario__add_security_service_to_share_network) - mock_add_security_service_to_share_network.assert_has_calls([ - mock.call(mock.ANY, mock.ANY) - for i in range( - self.TENANTS_AMOUNT * - networks_per_tenant * - len(self.SECURITY_SERVICES))]) - if neutron: - sn_args = { - "neutron_net_id": mock.ANY, - "neutron_subnet_id": mock.ANY, - } - else: - sn_args = {"nova_net_id": mock.ANY} - expected_calls = [ - mock.call(**sn_args), - mock.call().to_dict(), - mock.ANY, - mock.ANY, - mock.ANY, - ] - mock_manila_scenario__create_share_network.assert_has_calls( - expected_calls * (self.TENANTS_AMOUNT * networks_per_tenant)) - mock_clients.assert_has_calls([mock.call(MOCK_USER_CREDENTIAL, {}) - for i in range(self.TENANTS_AMOUNT)]) - - @ddt.data(True, False) - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(MANILA_UTILS_PATH + "_create_share_network") - @mock.patch(MANILA_UTILS_PATH + "_add_security_service_to_share_network") - def test_setup_autocreate_share_networks_wo_security_services( - self, - neutron, - mock_manila_scenario__add_security_service_to_share_network, - mock_manila_scenario__create_share_network, - mock_clients): - networks_per_tenant = 2 - ctxt = self._get_context( - networks_per_tenant=networks_per_tenant, - neutron_network_provider=neutron, - ) - inst = manila_share_networks.ShareNetworks(ctxt) - - inst.setup() - - self.assertEqual(ctxt["task"], inst.context.get("task")) - self.assertEqual(ctxt["config"], inst.context.get("config")) - self.assertEqual(ctxt["users"], inst.context.get("users")) - self.assertEqual(ctxt["tenants"], inst.context.get("tenants")) - self.assertFalse( - mock_manila_scenario__add_security_service_to_share_network.called) - if neutron: - sn_args = { - "neutron_net_id": mock.ANY, - "neutron_subnet_id": mock.ANY, - } - else: - sn_args = {"nova_net_id": mock.ANY} - expected_calls = [mock.call(**sn_args), mock.call().to_dict()] - mock_manila_scenario__create_share_network.assert_has_calls( - expected_calls * (self.TENANTS_AMOUNT * networks_per_tenant)) - mock_clients.assert_has_calls([mock.call(MOCK_USER_CREDENTIAL, {}) - for i in range(self.TENANTS_AMOUNT)]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(MANILA_UTILS_PATH + "_create_share_network") - @mock.patch(MANILA_UTILS_PATH + "_add_security_service_to_share_network") - def test_setup_autocreate_share_networks_wo_networks( - self, - mock_manila_scenario__add_security_service_to_share_network, - mock_manila_scenario__create_share_network, - mock_clients): - ctxt = self._get_context(networks_per_tenant=0) - inst = manila_share_networks.ShareNetworks(ctxt) - - inst.setup() - - self.assertEqual(ctxt["task"], inst.context.get("task")) - self.assertEqual(ctxt["config"], inst.context.get("config")) - self.assertEqual(ctxt["users"], inst.context.get("users")) - self.assertEqual(ctxt["tenants"], inst.context.get("tenants")) - self.assertFalse( - mock_manila_scenario__add_security_service_to_share_network.called) - expected_calls = [mock.call(), mock.call().to_dict()] - mock_manila_scenario__create_share_network.assert_has_calls( - expected_calls * self.TENANTS_AMOUNT) - mock_clients.assert_has_calls([mock.call(MOCK_USER_CREDENTIAL, {}) - for i in range(self.TENANTS_AMOUNT)]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(MANILA_UTILS_PATH + "_delete_share_network") - @mock.patch(MANILA_UTILS_PATH + "_list_share_servers") - @mock.patch(MANILA_UTILS_PATH + "_list_share_networks") - def test_cleanup_used_existing_share_networks( - self, - mock_manila_scenario__list_share_networks, - mock_manila_scenario__list_share_servers, - mock_manila_scenario__delete_share_network, - mock_clients): - inst = manila_share_networks.ShareNetworks(self.ctxt_use_existing) - mock_manila_scenario__list_share_networks.return_value = ( - self.existing_sns) - inst.setup() - - inst.cleanup() - - self.assertFalse(mock_manila_scenario__list_share_servers.called) - self.assertFalse(mock_manila_scenario__delete_share_network.called) - self.assertEqual(2, mock_clients.call_count) - for user in self.ctxt_use_existing["users"]: - self.assertIn(mock.call(user["credential"], {}), - mock_clients.mock_calls) - - @mock.patch("rally.plugins.openstack.context.manila.manila_share_networks." - "resource_manager.cleanup") - def test_cleanup_autocreated_share_networks(self, mock_cleanup): - task_id = "task" - ctxt = { - "config": {"manila_share_networks": { - "use_share_networks": True}}, - "users": [mock.Mock()], - "task": {"uuid": task_id}} - - inst = manila_share_networks.ShareNetworks(ctxt) - - inst.cleanup() - - mock_cleanup.assert_called_once_with( - names=["manila.share_networks"], - users=ctxt["users"], - superclass=manila_share_networks.ShareNetworks, - api_versions=None, - task_id=task_id) diff --git a/tests/unit/plugins/openstack/context/manila/test_manila_shares.py b/tests/unit/plugins/openstack/context/manila/test_manila_shares.py deleted file mode 100644 index 24afa2f25f..0000000000 --- a/tests/unit/plugins/openstack/context/manila/test_manila_shares.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright 2016 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import ddt -import mock -import six - -from rally import consts as rally_consts -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.context.manila import manila_shares -from rally.plugins.openstack.scenarios.manila import utils as manila_utils -from tests.unit import test - -MANILA_UTILS_PATH = ( - "rally.plugins.openstack.scenarios.manila.utils.ManilaScenario.") - - -class Fake(object): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - setattr(self, k, v) - - def __getitem__(self, item): - return getattr(self, item) - - def to_dict(self): - return self.__dict__ - - -@ddt.ddt -class SharesTestCase(test.TestCase): - TENANTS_AMOUNT = 3 - USERS_PER_TENANT = 4 - SHARES_PER_TENANT = 7 - SHARE_NETWORKS = [{"id": "sn_%s_id" % d} for d in range(3)] - - def _get_context(self, use_share_networks=False, shares_per_tenant=None, - share_size=1, share_proto="fake_proto", share_type=None): - tenants = {} - for t_id in range(self.TENANTS_AMOUNT): - tenants[six.text_type(t_id)] = {"name": six.text_type(t_id)} - users = [] - for t_id in sorted(list(tenants.keys())): - for i in range(self.USERS_PER_TENANT): - users.append({ - "id": i, "tenant_id": t_id, - "credential": mock.MagicMock()}) - context = { - "config": { - "users": { - "tenants": self.TENANTS_AMOUNT, - "users_per_tenant": self.USERS_PER_TENANT, - "user_choice_method": "round_robin", - }, - consts.SHARE_NETWORKS_CONTEXT_NAME: { - "use_share_networks": use_share_networks, - "share_networks": self.SHARE_NETWORKS, - }, - consts.SHARES_CONTEXT_NAME: { - "shares_per_tenant": ( - shares_per_tenant or self.SHARES_PER_TENANT), - "size": share_size, - "share_proto": share_proto, - "share_type": share_type, - }, - }, - "admin": { - "credential": mock.MagicMock(), - }, - "task": mock.MagicMock(), - "owner_id": "foo_uuid", - "users": users, - "tenants": tenants, - } - if use_share_networks: - for t in context["tenants"].keys(): - context["tenants"][t][consts.SHARE_NETWORKS_CONTEXT_NAME] = { - "share_networks": self.SHARE_NETWORKS, - } - return context - - def test_init(self): - ctxt = { - "task": mock.MagicMock(), - "config": { - consts.SHARES_CONTEXT_NAME: {"foo": "bar"}, - "fake": {"fake_key": "fake_value"}, - }, - } - - inst = manila_shares.Shares(ctxt) - - self.assertEqual( - {"foo": "bar", "shares_per_tenant": 1, "size": 1, - "share_proto": "NFS", "share_type": None}, - inst.config) - self.assertIn( - rally_consts.JSON_SCHEMA, inst.CONFIG_SCHEMA.get("$schema")) - self.assertFalse(inst.CONFIG_SCHEMA.get("additionalProperties")) - self.assertEqual("object", inst.CONFIG_SCHEMA.get("type")) - props = inst.CONFIG_SCHEMA.get("properties", {}) - self.assertEqual( - {"minimum": 1, "type": "integer"}, props.get("shares_per_tenant")) - self.assertEqual({"minimum": 1, "type": "integer"}, props.get("size")) - self.assertEqual({"type": "string"}, props.get("share_proto")) - self.assertEqual({"type": "string"}, props.get("share_type")) - self.assertEqual(455, inst.get_order()) - self.assertEqual(consts.SHARES_CONTEXT_NAME, inst.get_name()) - - @mock.patch(MANILA_UTILS_PATH + "_create_share") - @ddt.data(True, False) - def test_setup( - self, - use_share_networks, - mock_manila_scenario__create_share): - share_type = "fake_share_type" - ctxt = self._get_context( - use_share_networks=use_share_networks, share_type=share_type) - inst = manila_shares.Shares(ctxt) - shares = [ - Fake(id="fake_share_id_%d" % s_id) - for s_id in range(self.TENANTS_AMOUNT * self.SHARES_PER_TENANT) - ] - mock_manila_scenario__create_share.side_effect = shares - expected_ctxt = copy.deepcopy(ctxt) - - inst.setup() - - self.assertEqual( - self.TENANTS_AMOUNT * self.SHARES_PER_TENANT, - mock_manila_scenario__create_share.call_count) - for d in range(self.TENANTS_AMOUNT): - self.assertEqual( - [ - s.to_dict() for s in shares[ - (d * self.SHARES_PER_TENANT):( - d * self.SHARES_PER_TENANT + self.SHARES_PER_TENANT - ) - ] - ], - inst.context.get("tenants", {}).get("%s" % d, {}).get("shares") - ) - self.assertEqual(expected_ctxt["task"], inst.context.get("task")) - self.assertEqual(expected_ctxt["config"], inst.context.get("config")) - self.assertEqual(expected_ctxt["users"], inst.context.get("users")) - if use_share_networks: - mock_calls = [ - mock.call( - share_proto=ctxt["config"][consts.SHARES_CONTEXT_NAME][ - "share_proto"], - size=ctxt["config"][consts.SHARES_CONTEXT_NAME]["size"], - share_type=ctxt["config"][consts.SHARES_CONTEXT_NAME][ - "share_type"], - share_network=self.SHARE_NETWORKS[ - int(t_id) % len(self.SHARE_NETWORKS)]["id"] - ) for t_id in expected_ctxt["tenants"].keys() - ] - else: - mock_calls = [ - mock.call( - share_proto=ctxt["config"][consts.SHARES_CONTEXT_NAME][ - "share_proto"], - size=ctxt["config"][consts.SHARES_CONTEXT_NAME]["size"], - share_type=ctxt["config"][consts.SHARES_CONTEXT_NAME][ - "share_type"], - ) for t_id in expected_ctxt["tenants"].keys() - ] - mock_manila_scenario__create_share.assert_has_calls( - mock_calls, any_order=True) - - @mock.patch(MANILA_UTILS_PATH + "_create_share") - @mock.patch("rally.plugins.openstack.cleanup.manager.cleanup") - def test_cleanup( - self, - mock_cleanup_manager_cleanup, - mock_manila_scenario__create_share): - ctxt = self._get_context() - inst = manila_shares.Shares(ctxt) - shares = [ - Fake(id="fake_share_id_%d" % s_id) - for s_id in range(self.TENANTS_AMOUNT * self.SHARES_PER_TENANT) - ] - mock_manila_scenario__create_share.side_effect = shares - inst.setup() - - inst.cleanup() - - mock_cleanup_manager_cleanup.assert_called_once_with( - names=["manila.shares"], - users=inst.context.get("users", []), - superclass=manila_utils.ManilaScenario, - task_id="foo_uuid") diff --git a/tests/unit/plugins/openstack/context/monasca/__init__.py b/tests/unit/plugins/openstack/context/monasca/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/monasca/test_metrics.py b/tests/unit/plugins/openstack/context/monasca/test_metrics.py deleted file mode 100644 index 2717b25c5d..0000000000 --- a/tests/unit/plugins/openstack/context/monasca/test_metrics.py +++ /dev/null @@ -1,100 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six - -from rally.plugins.openstack.context.monasca import metrics -from rally.plugins.openstack.scenarios.monasca import utils as monasca_utils -from tests.unit import test - -CTX = "rally.plugins.openstack.context.monasca" - - -class MonascaMetricGeneratorTestCase(test.TestCase): - - def _gen_tenants(self, count): - tenants = {} - for id in six.moves.range(count): - tenants[str(id)] = {"name": str(id)} - return tenants - - def _gen_context(self, tenants_count, users_per_tenant, - metrics_per_tenant): - tenants = self._gen_tenants(tenants_count) - users = [] - for id in tenants.keys(): - for i in six.moves.range(users_per_tenant): - users.append({"id": i, "tenant_id": id, - "endpoint": mock.MagicMock()}) - context = test.get_test_context() - context.update({ - "config": { - "users": { - "tenants": tenants_count, - "users_per_tenant": users_per_tenant, - "concurrent": 10, - }, - "monasca_metrics": { - "name": "fake-metric-name", - "dimensions": { - "region": "fake-region", - "service": "fake-identity", - "hostname": "fake-hostname", - "url": "fake-url" - }, - "metrics_per_tenant": metrics_per_tenant, - }, - "roles": [ - "monasca-user" - ] - }, - "admin": { - "endpoint": mock.MagicMock() - }, - "users": users, - "tenants": tenants - }) - return tenants, context - - @mock.patch("%s.metrics.rutils.interruptable_sleep" % CTX) - @mock.patch("%s.metrics.monasca_utils.MonascaScenario" % CTX) - def test_setup(self, mock_monasca_scenario, mock_interruptable_sleep): - tenants_count = 2 - users_per_tenant = 4 - metrics_per_tenant = 5 - - tenants, real_context = self._gen_context( - tenants_count, users_per_tenant, metrics_per_tenant) - - monasca_ctx = metrics.MonascaMetricGenerator(real_context) - monasca_ctx.setup() - - self.assertEqual(tenants_count, mock_monasca_scenario.call_count, - "Scenario should be constructed same times as " - "number of tenants") - self.assertEqual(metrics_per_tenant * tenants_count, - mock_monasca_scenario.return_value._create_metrics. - call_count, - "Total number of metrics created should be tenant" - "counts times metrics per tenant") - first_call = mock.call(0.001) - second_call = mock.call(monasca_utils.CONF.openstack. - monasca_metric_create_prepoll_delay, - atomic_delay=1) - self.assertEqual([first_call] * metrics_per_tenant * tenants_count + - [second_call], - mock_interruptable_sleep.call_args_list, - "Method interruptable_sleep should be called " - "tenant counts times metrics plus one") diff --git a/tests/unit/plugins/openstack/context/murano/__init__.py b/tests/unit/plugins/openstack/context/murano/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/murano/test_murano_environments.py b/tests/unit/plugins/openstack/context/murano/test_murano_environments.py deleted file mode 100644 index 4ea04bee4c..0000000000 --- a/tests/unit/plugins/openstack/context/murano/test_murano_environments.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.murano import murano_environments -from rally.plugins.openstack.scenarios.murano import utils as murano_utils -from tests.unit import test - -CTX = "rally.plugins.openstack.context.murano.murano_environments" -SCN = "rally.plugins.openstack.scenarios" - - -class MuranoEnvironmentGeneratorTestCase(test.TestCase): - - def setUp(self): - super(MuranoEnvironmentGeneratorTestCase, self).setUp() - - @staticmethod - def _get_context(): - return { - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 1, - "concurrent": 1, - }, - "murano_environments": { - "environments_per_tenant": 1 - } - }, - "admin": { - "credential": mock.MagicMock() - }, - "task": mock.MagicMock(), - "owner_id": "foo_uuid", - "users": [ - { - "id": "user_0", - "tenant_id": "tenant_0", - "credential": mock.MagicMock() - }, - { - "id": "user_1", - "tenant_id": "tenant_1", - "credential": mock.MagicMock() - } - ], - "tenants": { - "tenant_0": {"name": "tenant_0_name"}, - "tenant_1": {"name": "tenant_1_name"} - } - } - - @mock.patch("%s.murano.utils.MuranoScenario._create_environment" % SCN) - def test_setup(self, mock_create_env): - murano_ctx = murano_environments.EnvironmentGenerator( - self._get_context()) - murano_ctx.setup() - - self.assertEqual(2, len(murano_ctx.context["tenants"])) - tenant_id = murano_ctx.context["users"][0]["tenant_id"] - self.assertEqual([mock_create_env.return_value], - murano_ctx.context["tenants"][tenant_id][ - "environments"]) - - @mock.patch("%s.murano.utils.MuranoScenario._create_environment" % SCN) - @mock.patch("%s.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup, mock_create_env): - murano_ctx = murano_environments.EnvironmentGenerator( - self._get_context()) - murano_ctx.setup() - murano_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["murano.environments"], - users=murano_ctx.context["users"], - superclass=murano_utils.MuranoScenario, - task_id="foo_uuid") diff --git a/tests/unit/plugins/openstack/context/murano/test_murano_packages.py b/tests/unit/plugins/openstack/context/murano/test_murano_packages.py deleted file mode 100644 index 200c7c60ad..0000000000 --- a/tests/unit/plugins/openstack/context/murano/test_murano_packages.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.murano import murano_packages -from tests.unit import test - -CTX = "rally.plugins.openstack.context.murano.murano_packages" - - -class MuranoPackageGeneratorTestCase(test.TestCase): - - def setUp(self): - super(MuranoPackageGeneratorTestCase, self).setUp() - - @staticmethod - def _get_context(): - return { - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 1, - "concurrent": 1, - }, - "murano_packages": { - "app_package": ( - "rally-jobs/extra/murano/" - "applications/HelloReporter/" - "io.murano.apps.HelloReporter.zip") - } - }, - "admin": { - "credential": mock.MagicMock() - }, - "task": mock.MagicMock(), - "owner_id": "foo_uuid", - "users": [ - { - "id": "user_0", - "tenant_id": "tenant_0", - "credential": "credential" - }, - { - "id": "user_1", - "tenant_id": "tenant_1", - "credential": "credential" - } - ], - "tenants": { - "tenant_0": {"name": "tenant_0_name"}, - "tenant_1": {"name": "tenant_1_name"} - } - } - - @mock.patch("%s.osclients" % CTX) - def test_setup(self, mock_osclients): - mock_app = mock.MagicMock(id="fake_app_id") - (mock_osclients.Clients().murano(). - packages.create.return_value) = mock_app - - murano_ctx = murano_packages.PackageGenerator(self._get_context()) - murano_ctx.setup() - - self.assertEqual(2, len(murano_ctx.context["tenants"])) - tenant_id = murano_ctx.context["users"][0]["tenant_id"] - self.assertEqual([mock_app], - murano_ctx.context["tenants"][tenant_id]["packages"]) - - @mock.patch("%s.osclients" % CTX) - @mock.patch("%s.resource_manager.cleanup" % CTX) - def test_cleanup_with_zip(self, mock_cleanup, mock_osclients): - mock_app = mock.Mock(id="fake_app_id") - (mock_osclients.Clients().murano(). - packages.create.return_value) = mock_app - - murano_ctx = murano_packages.PackageGenerator(self._get_context()) - murano_ctx.setup() - murano_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["murano.packages"], - users=murano_ctx.context["users"], - superclass=murano_packages.PackageGenerator, - task_id="foo_uuid") - - @mock.patch("%s.osclients" % CTX) - @mock.patch("%s.resource_manager.cleanup" % CTX) - def test_cleanup_with_dir(self, mock_cleanup, mock_osclients): - mock_app = mock.Mock(id="fake_app_id") - (mock_osclients.Clients().murano(). - packages.create.return_value) = mock_app - ctx_dict = self._get_context() - app_dir = ("rally-jobs/extra/murano/applications/" - "HelloReporter/io.murano.apps.HelloReporter/") - ctx_dict["config"]["murano_packages"]["app_package"] = app_dir - - murano_ctx = murano_packages.PackageGenerator(ctx_dict) - murano_ctx.setup() - murano_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["murano.packages"], - users=murano_ctx.context["users"], - superclass=murano_packages.PackageGenerator, - task_id="foo_uuid") diff --git a/tests/unit/plugins/openstack/context/network/__init__.py b/tests/unit/plugins/openstack/context/network/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/network/test_allow_ssh.py b/tests/unit/plugins/openstack/context/network/test_allow_ssh.py deleted file mode 100644 index 86ae0424a0..0000000000 --- a/tests/unit/plugins/openstack/context/network/test_allow_ssh.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.network import allow_ssh -from tests.unit import test - - -CTX = "rally.plugins.openstack.context.network.allow_ssh" - - -class AllowSSHContextTestCase(test.TestCase): - - def setUp(self): - super(AllowSSHContextTestCase, self).setUp() - self.users = 2 - self.secgroup_name = "test-secgroup" - - self.ctx_with_secgroup = test.get_test_context() - self.ctx_with_secgroup.update({ - "users": [ - { - "tenant_id": "uuid1", - "credential": "credential", - "secgroup": {"id": "secgroup_id", "name": "secgroup"} - } - ] * self.users, - "admin": {"tenant_id": "uuid2", "credential": "admin_credential"}, - "tenants": {"uuid1": {"id": "uuid1", "name": "uuid1"}}, - }) - self.ctx_without_secgroup = test.get_test_context() - self.ctx_without_secgroup.update({ - "users": [{"tenant_id": "uuid1", - "credential": "credential"}, - {"tenant_id": "uuid1", - "credential": "credential"}], - "admin": {"tenant_id": "uuid2", "credential": "admin_credential"}, - "tenants": {"uuid1": {"id": "uuid1", "name": "uuid1"}}, - }) - - @mock.patch("%s.osclients.Clients" % CTX) - def test__prepare_open_secgroup_rules(self, mock_clients): - fake_neutron = mock_clients.return_value.neutron.return_value - fake_neutron.list_security_groups.return_value = { - "security_groups": [{"id": "id", "name": "foo", - "security_group_rules": []}]} - - allow_ssh._prepare_open_secgroup("credential", self.secgroup_name) - allow_ssh._prepare_open_secgroup("credential", "foo") - - @mock.patch("%s.osclients.Clients" % CTX) - @mock.patch("%s._prepare_open_secgroup" % CTX) - @mock.patch("rally.plugins.openstack.wrappers.network.wrap") - def test_secgroup_setup_cleanup_with_secgroup_supported( - self, mock_network_wrap, mock__prepare_open_secgroup, - mock_clients): - mock_network_wrapper = mock.MagicMock() - mock_network_wrapper.supports_extension.return_value = (True, "") - mock_network_wrap.return_value = mock_network_wrapper - mock__prepare_open_secgroup.return_value = { - "name": "secgroup", - "id": "secgroup_id"} - mock_clients.return_value = mock.MagicMock() - - secgrp_ctx = allow_ssh.AllowSSH(self.ctx_with_secgroup) - secgrp_ctx.setup() - self.assertEqual(self.ctx_with_secgroup, secgrp_ctx.context) - secgrp_ctx.cleanup() - - self.assertEqual( - [ - mock.call("admin_credential"), - mock.call("credential"), - mock.call().neutron(), - mock.call().neutron().delete_security_group("secgroup_id") - ], - mock_clients.mock_calls) - - mock_network_wrap.assert_called_once_with( - mock_clients.return_value, secgrp_ctx, config={}) - - @mock.patch("%s.osclients.Clients" % CTX) - @mock.patch("rally.plugins.openstack.wrappers.network.wrap") - def test_secgroup_setup_with_secgroup_unsupported( - self, mock_network_wrap, mock_clients): - mock_network_wrapper = mock.MagicMock() - mock_network_wrapper.supports_extension.return_value = ( - False, "Not supported") - mock_network_wrap.return_value = mock_network_wrapper - mock_clients.return_value = mock.MagicMock() - - secgrp_ctx = allow_ssh.AllowSSH(dict(self.ctx_without_secgroup)) - secgrp_ctx.setup() - self.assertEqual(self.ctx_without_secgroup, secgrp_ctx.context) - - mock_clients.assert_called_once_with("admin_credential") - - mock_network_wrap.assert_called_once_with( - mock_clients.return_value, secgrp_ctx, config={}) diff --git a/tests/unit/plugins/openstack/context/network/test_network.py b/tests/unit/plugins/openstack/context/network/test_network.py deleted file mode 100644 index daacf54cd7..0000000000 --- a/tests/unit/plugins/openstack/context/network/test_network.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock -import netaddr - -from rally.plugins.openstack.context.network import networks as network_context -from tests.unit import test - -NET = "rally.plugins.openstack.wrappers.network." - - -@ddt.ddt -class NetworkTestCase(test.TestCase): - def get_context(self, **kwargs): - return {"task": {"uuid": "foo_task"}, - "admin": {"credential": "foo_admin"}, - "config": {"network": kwargs}, - "users": [{"id": "foo_user", "tenant_id": "foo_tenant"}, - {"id": "bar_user", "tenant_id": "bar_tenant"}], - "tenants": {"foo_tenant": {"networks": [{"id": "foo_net"}]}, - "bar_tenant": {"networks": [{"id": "bar_net"}]}}} - - def test_START_CIDR_DFLT(self): - netaddr.IPNetwork(network_context.Network.DEFAULT_CONFIG["start_cidr"]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(NET + "wrap", return_value="foo_service") - def test__init__default(self, mock_wrap, mock_clients): - context = network_context.Network(self.get_context()) - self.assertEqual(1, context.config["networks_per_tenant"]) - self.assertEqual(network_context.Network.DEFAULT_CONFIG["start_cidr"], - context.config["start_cidr"]) - self.assertIsNone(context.config["dns_nameservers"]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(NET + "wrap", return_value="foo_service") - def test__init__explicit(self, mock_wrap, mock_clients): - context = network_context.Network( - self.get_context(start_cidr="foo_cidr", networks_per_tenant=42, - network_create_args={"fakearg": "fake"}, - dns_nameservers=["1.2.3.4", "5.6.7.8"])) - self.assertEqual(42, context.config["networks_per_tenant"]) - self.assertEqual("foo_cidr", context.config["start_cidr"]) - self.assertEqual({"fakearg": "fake"}, - context.config["network_create_args"]) - self.assertEqual(("1.2.3.4", "5.6.7.8"), - context.config["dns_nameservers"]) - - @ddt.data({}, - {"dns_nameservers": []}, - {"dns_nameservers": ["1.2.3.4", "5.6.7.8"]}) - @ddt.unpack - @mock.patch(NET + "wrap") - @mock.patch("rally.plugins.openstack.context.network.networks.utils") - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_setup(self, mock_clients, mock_utils, mock_wrap, **dns_kwargs): - mock_utils.iterate_per_tenants.return_value = [ - ("foo_user", "foo_tenant"), - ("bar_user", "bar_tenant")] - mock_create = mock.Mock(side_effect=lambda t, **kw: t + "-net") - mock_utils.generate_random_name = mock.Mock() - mock_wrap.return_value = mock.Mock(create_network=mock_create) - nets_per_tenant = 2 - net_context = network_context.Network( - self.get_context(networks_per_tenant=nets_per_tenant, - network_create_args={"fakearg": "fake"}, - **dns_kwargs)) - - net_context.setup() - - if "dns_nameservers" in dns_kwargs: - dns_kwargs["dns_nameservers"] = tuple( - dns_kwargs["dns_nameservers"]) - create_calls = [ - mock.call(tenant, dualstack=False, - subnets_num=1, network_create_args={"fakearg": "fake"}, - router_create_args={"external": True}, - **dns_kwargs) - for user, tenant in mock_utils.iterate_per_tenants.return_value] - mock_create.assert_has_calls(create_calls) - - mock_utils.iterate_per_tenants.assert_called_once_with( - net_context.context["users"]) - expected_networks = ["bar_tenant-net", - "foo_tenant-net"] * nets_per_tenant - actual_networks = [] - for tenant_id, tenant_ctx in net_context.context["tenants"].items(): - actual_networks.extend(tenant_ctx["networks"]) - self.assertSequenceEqual(sorted(expected_networks), - sorted(actual_networks)) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(NET + "wrap") - def test_cleanup(self, mock_wrap, mock_clients): - net_context = network_context.Network(self.get_context()) - net_context.cleanup() - mock_wrap().delete_network.assert_has_calls( - [mock.call({"id": "foo_net"}), mock.call({"id": "bar_net"})], - any_order=True) diff --git a/tests/unit/plugins/openstack/context/network/test_routers.py b/tests/unit/plugins/openstack/context/network/test_routers.py deleted file mode 100644 index fec83b3639..0000000000 --- a/tests/unit/plugins/openstack/context/network/test_routers.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2017: Orange -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import mock - -from rally.plugins.openstack.context.network import routers as router_context -from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils - -from tests.unit import test - -SCN = "rally.plugins.openstack.scenarios" -CTX = "rally.plugins.openstack.context.network.routers" - - -class RouterTestCase(test.ScenarioTestCase): - - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = {"name": str(id_)} - return tenants - - def test__init__default(self): - self.context.update({ - "config": { - "router": { - "routers_per_tenant": 1, - } - } - }) - context = router_context.Router(self.context) - self.assertEqual(context.config["routers_per_tenant"], 1) - - @mock.patch("%s.neutron.utils.NeutronScenario._create_router" % SCN, - return_value={"id": "uuid"}) - def test_setup(self, mock_neutron_scenario__create_router): - tenants_count = 2 - users_per_tenant = 3 - routers_per_tenant = 2 - - tenants = self._gen_tenants(tenants_count) - users = [] - for id_ in tenants.keys(): - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": id_, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 3, - "concurrent": 2, - }, - "router": { - "routers_per_tenant": routers_per_tenant, - } - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users, - "tenants": tenants - }) - - new_context = copy.deepcopy(self.context) - for id_ in tenants.keys(): - new_context["tenants"][id_].setdefault("routers", []) - for i in range(routers_per_tenant): - new_context["tenants"][id_]["routers"].append({"id": "uuid"}) - - routers_ctx = router_context.Router(self.context) - routers_ctx.setup() - self.assertEqual(new_context, self.context) - - @mock.patch("%s.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup): - self.context.update({"users": mock.MagicMock()}) - routers_ctx = router_context.Router(self.context) - routers_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["neutron.router"], - users=self.context["users"], - superclass=neutron_utils.NeutronScenario, - task_id=self.context["owner_id"]) diff --git a/tests/unit/plugins/openstack/context/neutron/__init__.py b/tests/unit/plugins/openstack/context/neutron/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/neutron/test_existing_network.py b/tests/unit/plugins/openstack/context/neutron/test_existing_network.py deleted file mode 100644 index 48268de8af..0000000000 --- a/tests/unit/plugins/openstack/context/neutron/test_existing_network.py +++ /dev/null @@ -1,82 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.network import existing_network -from tests.unit import test - -CTX = "rally.plugins.openstack.context.network" - - -class ExistingNetworkTestCase(test.TestCase): - - def setUp(self): - super(ExistingNetworkTestCase, self).setUp() - - self.config = {"foo": "bar"} - self.context = test.get_test_context() - self.context.update({ - "users": [ - {"id": 1, - "tenant_id": "tenant1", - "credential": mock.Mock()}, - {"id": 2, - "tenant_id": "tenant2", - "credential": mock.Mock()}, - ], - "tenants": { - "tenant1": {}, - "tenant2": {}, - }, - "config": { - "existing_network": self.config - }, - }) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch("rally.plugins.openstack.wrappers.network.wrap") - def test_setup(self, mock_network_wrap, mock_clients): - networks = [mock.Mock(), mock.Mock(), mock.Mock()] - net_wrappers = { - "tenant1": mock.Mock( - **{"list_networks.return_value": networks[0:2]}), - "tenant2": mock.Mock( - **{"list_networks.return_value": networks[2:]}) - } - mock_network_wrap.side_effect = [net_wrappers["tenant1"], - net_wrappers["tenant2"]] - - context = existing_network.ExistingNetwork(self.context) - context.setup() - - mock_clients.assert_has_calls([ - mock.call(u["credential"]) for u in self.context["users"]]) - mock_network_wrap.assert_has_calls([ - mock.call(mock_clients.return_value, context, config=self.config), - mock.call(mock_clients.return_value, context, config=self.config)]) - for net_wrapper in net_wrappers.values(): - net_wrapper.list_networks.assert_called_once_with() - - self.assertEqual( - self.context["tenants"], - { - "tenant1": {"networks": networks[0:2]}, - "tenant2": {"networks": networks[2:]}, - } - ) - - def test_cleanup(self): - # NOTE(stpierre): Test that cleanup is not abstract - existing_network.ExistingNetwork({"task": mock.MagicMock()}).cleanup() diff --git a/tests/unit/plugins/openstack/context/neutron/test_lbaas.py b/tests/unit/plugins/openstack/context/neutron/test_lbaas.py deleted file mode 100644 index 3700113ad9..0000000000 --- a/tests/unit/plugins/openstack/context/neutron/test_lbaas.py +++ /dev/null @@ -1,159 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.neutron import lbaas as lbaas_context -from tests.unit import test - -NET = "rally.plugins.openstack.wrappers.network." - - -class LbaasTestCase(test.TestCase): - def get_context(self, **kwargs): - foo_tenant = {"networks": [{"id": "foo_net", - "tenant_id": "foo_tenant", - "subnets": ["foo_subnet"]}]} - bar_tenant = {"networks": [{"id": "bar_net", - "tenant_id": "bar_tenant", - "subnets": ["bar_subnet"]}]} - return {"task": {"uuid": "foo_task"}, - "admin": {"credential": "foo_admin"}, - "users": [{"id": "foo_user", "tenant_id": "foo_tenant"}, - {"id": "bar_user", "tenant_id": "bar_tenant"}], - "config": {"lbaas": kwargs}, - "tenants": {"foo_tenant": foo_tenant, - "bar_tenant": bar_tenant}} - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(NET + "wrap", return_value="foo_service") - def test__init__default(self, mock_wrap, mock_clients): - context = lbaas_context.Lbaas(self.get_context()) - self.assertEqual( - context.config["pool"]["lb_method"], - lbaas_context.Lbaas.DEFAULT_CONFIG["pool"]["lb_method"]) - self.assertEqual( - context.config["pool"]["protocol"], - lbaas_context.Lbaas.DEFAULT_CONFIG["pool"]["protocol"]) - self.assertEqual( - context.config["lbaas_version"], - lbaas_context.Lbaas.DEFAULT_CONFIG["lbaas_version"]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(NET + "wrap", return_value="foo_service") - def test__init__explicit(self, mock_wrap, mock_clients): - context = lbaas_context.Lbaas( - self.get_context(pool={"lb_method": "LEAST_CONNECTIONS"})) - self.assertEqual(context.config["pool"]["lb_method"], - "LEAST_CONNECTIONS") - - @mock.patch(NET + "wrap") - @mock.patch("rally.plugins.openstack.context.neutron.lbaas.utils") - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_setup_with_lbaas(self, mock_clients, mock_utils, mock_wrap): - mock_utils.iterate_per_tenants.return_value = [ - ("foo_user", "foo_tenant"), - ("bar_user", "bar_tenant")] - foo_net = {"id": "foo_net", - "tenant_id": "foo_tenant", - "subnets": ["foo_subnet"], - "lb_pools": [{"pool": {"id": "foo_pool", - "tenant_id": "foo_tenant"}}]} - bar_net = {"id": "bar_net", - "tenant_id": "bar_tenant", - "subnets": ["bar_subnet"], - "lb_pools": [{"pool": {"id": "bar_pool", - "tenant_id": "bar_tenant"}}]} - expected_net = [bar_net, foo_net] - mock_create = mock.Mock( - side_effect=lambda t, s, - **kw: {"pool": {"id": str(t.split("_")[0]) + "_pool", - "tenant_id": t}}) - actual_net = [] - mock_wrap.return_value = mock.Mock(create_v1_pool=mock_create) - net_wrapper = mock_wrap(mock_clients.return_value) - net_wrapper.supports_extension.return_value = (True, None) - fake_args = {"lbaas_version": 1} - lb_context = lbaas_context.Lbaas(self.get_context(**fake_args)) - lb_context.setup() - mock_utils.iterate_per_tenants.assert_called_once_with( - lb_context.context["users"]) - net_wrapper.supports_extension.assert_called_once_with("lbaas") - for tenant_id, tenant_ctx in ( - sorted(lb_context.context["tenants"].items())): - for network in tenant_ctx["networks"]: - actual_net.append(network) - self.assertEqual(expected_net, actual_net) - - @mock.patch(NET + "wrap") - @mock.patch("rally.plugins.openstack.context.neutron.lbaas.utils") - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_setup_with_no_lbaas(self, mock_clients, mock_utils, mock_wrap): - mock_utils.iterate_per_tenants.return_value = [ - ("bar_user", "bar_tenant")] - mock_create = mock.Mock(side_effect=lambda t, **kw: t + "-net") - mock_wrap.return_value = mock.Mock(create_v1_pool=mock_create) - fake_args = {"lbaas_version": 1} - lb_context = lbaas_context.Lbaas(self.get_context(**fake_args)) - net_wrapper = mock_wrap(mock_clients.return_value) - net_wrapper.supports_extension.return_value = (False, None) - lb_context.setup() - mock_utils.iterate_per_tenants.assert_not_called() - net_wrapper.supports_extension.assert_called_once_with("lbaas") - assert not net_wrapper.create_v1_pool.called - - @mock.patch(NET + "wrap") - @mock.patch("rally.plugins.openstack.context.neutron.lbaas.utils") - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_setup_with_lbaas_version_not_one(self, mock_clients, - mock_utils, mock_wrap): - mock_utils.iterate_per_tenants.return_value = [ - ("bar_user", "bar_tenant")] - mock_create = mock.Mock(side_effect=lambda t, **kw: t + "-net") - mock_wrap.return_value = mock.Mock(create_v1_pool=mock_create) - fake_args = {"lbaas_version": 2} - lb_context = lbaas_context.Lbaas(self.get_context(**fake_args)) - net_wrapper = mock_wrap(mock_clients.return_value) - net_wrapper.supports_extension.return_value = (True, None) - self.assertRaises(NotImplementedError, lb_context.setup) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(NET + "wrap") - def test_cleanup(self, mock_wrap, mock_clients): - net_wrapper = mock_wrap(mock_clients.return_value) - lb_context = lbaas_context.Lbaas(self.get_context()) - expected_pools = [] - for tenant_id, tenant_ctx in lb_context.context["tenants"].items(): - resultant_pool = {"pool": { - "id": str(tenant_id.split("_")[0]) + "_pool"}} - expected_pools.append(resultant_pool) - for network in ( - lb_context.context["tenants"][tenant_id]["networks"]): - network.setdefault("lb_pools", []).append(resultant_pool) - lb_context.cleanup() - net_wrapper.delete_v1_pool.assert_has_calls( - [mock.call(pool["pool"]["id"]) for pool in expected_pools]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch(NET + "wrap") - def test_cleanup_lbaas_version_not_one(self, mock_wrap, mock_clients): - fakeargs = {"lbaas_version": 2} - net_wrapper = mock_wrap(mock_clients.return_value) - lb_context = lbaas_context.Lbaas(self.get_context(**fakeargs)) - for tenant_id, tenant_ctx in lb_context.context["tenants"].items(): - resultant_pool = {"pool": { - "id": str(tenant_id.split("_")[0]) + "_pool"}} - for network in ( - lb_context.context["tenants"][tenant_id]["networks"]): - network.setdefault("lb_pools", []).append(resultant_pool) - lb_context.cleanup() - assert not net_wrapper.delete_v1_pool.called diff --git a/tests/unit/plugins/openstack/context/nova/__init__.py b/tests/unit/plugins/openstack/context/nova/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/nova/test_flavors.py b/tests/unit/plugins/openstack/context/nova/test_flavors.py deleted file mode 100644 index e0e406ee21..0000000000 --- a/tests/unit/plugins/openstack/context/nova/test_flavors.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import mock -from novaclient import exceptions as nova_exceptions - -from rally.plugins.openstack.context.nova import flavors -from tests.unit import test - -CTX = "rally.plugins.openstack.context.nova" - - -class FlavorsGeneratorTestCase(test.TestCase): - - def setUp(self): - super(FlavorsGeneratorTestCase, self).setUp() - self.context = { - "config": { - "flavors": [{ - "name": "flavor_name", - "ram": 2048, - "disk": 10, - "vcpus": 3, - "ephemeral": 3, - "swap": 5, - "extra_specs": { - "key": "value" - } - }] - }, - "admin": { - "credential": mock.MagicMock() - }, - "task": mock.MagicMock(), - } - - @mock.patch("%s.flavors.osclients.Clients" % CTX) - def test_setup(self, mock_clients): - # Setup and mock - mock_create = mock_clients().nova().flavors.create - mock_create().to_dict.return_value = {"flavor_key": "flavor_value"} - - # Run - flavors_ctx = flavors.FlavorsGenerator(self.context) - flavors_ctx.setup() - - # Assertions - self.assertEqual({"flavor_name": {"flavor_key": "flavor_value"}}, - flavors_ctx.context["flavors"]) - - mock_clients.assert_called_with(self.context["admin"]["credential"]) - - mock_create.assert_called_with( - name="flavor_name", ram=2048, vcpus=3, - disk=10, ephemeral=3, swap=5) - mock_create().set_keys.assert_called_with({"key": "value"}) - mock_create().to_dict.assert_called_with() - - @mock.patch("%s.flavors.osclients.Clients" % CTX) - def test_setup_failexists(self, mock_clients): - # Setup and mock - new_context = copy.deepcopy(self.context) - new_context["flavors"] = {} - - mock_flavor_create = mock_clients().nova().flavors.create - - exception = nova_exceptions.Conflict("conflict") - mock_flavor_create.side_effect = exception - - # Run - flavors_ctx = flavors.FlavorsGenerator(self.context) - flavors_ctx.setup() - - # Assertions - self.assertEqual(new_context, flavors_ctx.context) - - mock_clients.assert_called_with(self.context["admin"]["credential"]) - - mock_flavor_create.assert_called_once_with( - name="flavor_name", ram=2048, vcpus=3, - disk=10, ephemeral=3, swap=5) - - @mock.patch("%s.flavors.rutils.make_name_matcher" % CTX) - @mock.patch("%s.flavors.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup, mock_make_name_matcher): - # Setup and mock - real_context = { - "config": { - "flavors": [ - {"name": "flavor_name"}, - ] - }, - "admin": { - "credential": mock.MagicMock() - }, - "task": mock.MagicMock(), - } - - # Run - flavors_ctx = flavors.FlavorsGenerator(real_context) - flavors_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["nova.flavors"], - admin=real_context["admin"], - api_versions=None, - superclass=mock_make_name_matcher.return_value, - task_id=flavors_ctx.get_owner_id()) - - mock_make_name_matcher.assert_called_once_with("flavor_name") diff --git a/tests/unit/plugins/openstack/context/nova/test_keypairs.py b/tests/unit/plugins/openstack/context/nova/test_keypairs.py deleted file mode 100644 index 94c5e377a2..0000000000 --- a/tests/unit/plugins/openstack/context/nova/test_keypairs.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2014: Rackspace UK -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.nova import keypairs -from tests.unit import test - -CTX = "rally.plugins.openstack.context.nova" - - -class KeyPairContextTestCase(test.TestCase): - - def setUp(self): - super(KeyPairContextTestCase, self).setUp() - self.users = 2 - - task = {"uuid": "foo_task_id"} - self.ctx_with_keys = { - "users": [ - { - "keypair": { - "id": "key_id_1", - "key": "key_1", - "name": "key_name_1" - }, - "credential": "credential_1" - }, - { - "keypair": { - "id": "key_id_2", - "key": "key_2", - "name": "key_name_2" - }, - "credential": "credential_2" - }, - ], - "task": task - } - self.ctx_without_keys = { - "users": [{"credential": "credential_1"}, - {"credential": "credential_2"}], - "task": task - } - - def test_keypair_setup(self): - keypair_ctx = keypairs.Keypair(self.ctx_without_keys) - keypair_ctx._generate_keypair = mock.Mock(side_effect=[ - {"id": "key_id_1", "key": "key_1", "name": "key_name_1"}, - {"id": "key_id_2", "key": "key_2", "name": "key_name_2"}, - ]) - - keypair_ctx.setup() - self.assertEqual(keypair_ctx.context, self.ctx_with_keys) - - keypair_ctx._generate_keypair.assert_has_calls( - [mock.call("credential_1"), mock.call("credential_2")]) - - @mock.patch("%s.keypairs.resource_manager.cleanup" % CTX) - def test_keypair_cleanup(self, mock_cleanup): - keypair_ctx = keypairs.Keypair(self.ctx_with_keys) - keypair_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["nova.keypairs"], - users=self.ctx_with_keys["users"], - superclass=keypairs.Keypair, - task_id=self.ctx_with_keys["task"]["uuid"]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_keypair_generate(self, mock_clients): - mock_keypairs = mock_clients.return_value.nova.return_value.keypairs - mock_keypair = mock_keypairs.create.return_value - mock_keypair.public_key = "public_key" - mock_keypair.private_key = "private_key" - mock_keypair.id = "key_id" - keypair_ctx = keypairs.Keypair(self.ctx_without_keys) - keypair_ctx.generate_random_name = mock.Mock() - - key = keypair_ctx._generate_keypair("credential") - - self.assertEqual({ - "id": "key_id", - "name": keypair_ctx.generate_random_name.return_value, - "private": "private_key", - "public": "public_key" - }, key) - - mock_clients.assert_has_calls([ - mock.call().nova().keypairs.create( - keypair_ctx.generate_random_name.return_value), - ]) diff --git a/tests/unit/plugins/openstack/context/nova/test_servers.py b/tests/unit/plugins/openstack/context/nova/test_servers.py deleted file mode 100755 index c0b6834e74..0000000000 --- a/tests/unit/plugins/openstack/context/nova/test_servers.py +++ /dev/null @@ -1,173 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import copy - -import mock - -from rally.plugins.openstack.context.nova import servers -from rally.plugins.openstack.scenarios.nova import utils as nova_utils -from tests.unit import fakes -from tests.unit import test - -CTX = "rally.plugins.openstack.context.nova" -SCN = "rally.plugins.openstack.scenarios" -TYP = "rally.plugins.openstack.types" - - -class ServerGeneratorTestCase(test.ScenarioTestCase): - - def _gen_tenants(self, count): - tenants = {} - for id_ in range(count): - tenants[str(id_)] = {"name": str(id_)} - return tenants - - def test_init(self): - tenants_count = 2 - servers_per_tenant = 5 - self.context.update({ - "config": { - "servers": { - "servers_per_tenant": servers_per_tenant, - } - }, - "tenants": self._gen_tenants(tenants_count)}) - - inst = servers.ServerGenerator(self.context) - self.assertEqual({"auto_assign_nic": False, "servers_per_tenant": 5}, - inst.config) - - @mock.patch("%s.nova.utils.NovaScenario._boot_servers" % SCN, - return_value=[ - fakes.FakeServer(id="uuid"), - fakes.FakeServer(id="uuid"), - fakes.FakeServer(id="uuid"), - fakes.FakeServer(id="uuid"), - fakes.FakeServer(id="uuid") - ]) - @mock.patch("%s.GlanceImage" % TYP) - @mock.patch("%s.Flavor" % TYP) - def test_setup(self, mock_flavor, mock_glance_image, - mock_nova_scenario__boot_servers): - - tenants_count = 2 - users_per_tenant = 5 - servers_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for id_ in tenants.keys(): - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": id_, - "credential": mock.MagicMock()}) - - self.context.update({ - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 5, - "concurrent": 10, - }, - "servers": { - "auto_assign_nic": True, - "servers_per_tenant": 5, - "image": { - "name": "cirros-0.3.4-x86_64-uec", - }, - "flavor": { - "name": "m1.tiny", - }, - "nics": ["foo", "bar"] - }, - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users, - "tenants": tenants - }) - - new_context = copy.deepcopy(self.context) - for id_ in new_context["tenants"]: - new_context["tenants"][id_].setdefault("servers", []) - for i in range(servers_per_tenant): - new_context["tenants"][id_]["servers"].append("uuid") - - servers_ctx = servers.ServerGenerator(self.context) - servers_ctx.setup() - self.assertEqual(new_context, self.context) - image_id = mock_glance_image.return_value.pre_process.return_value - flavor_id = mock_flavor.return_value.pre_process.return_value - servers_ctx_config = self.context["config"]["servers"] - expected_auto_nic = servers_ctx_config.get("auto_assign_nic", False) - expected_requests = servers_ctx_config.get("servers_per_tenant", False) - called_times = len(tenants) - mock_calls = [mock.call(image_id, flavor_id, - auto_assign_nic=expected_auto_nic, - nics=[{"net-id": "foo"}, {"net-id": "bar"}], - requests=expected_requests) - for i in range(called_times)] - mock_nova_scenario__boot_servers.assert_has_calls(mock_calls) - - @mock.patch("%s.servers.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup): - - tenants_count = 2 - users_per_tenant = 5 - servers_per_tenant = 5 - - tenants = self._gen_tenants(tenants_count) - users = [] - for id_ in tenants.keys(): - for i in range(users_per_tenant): - users.append({"id": i, "tenant_id": id_, - "credential": "credential"}) - tenants[id_].setdefault("servers", []) - for j in range(servers_per_tenant): - tenants[id_]["servers"].append("uuid") - - self.context.update({ - "config": { - "users": { - "tenants": 2, - "users_per_tenant": 5, - "concurrent": 10, - }, - "servers": { - "servers_per_tenant": 5, - "image": { - "name": "cirros-0.3.4-x86_64-uec", - }, - "flavor": { - "name": "m1.tiny", - }, - }, - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users, - "tenants": tenants - }) - - servers_ctx = servers.ServerGenerator(self.context) - servers_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["nova.servers"], - users=self.context["users"], - superclass=nova_utils.NovaScenario, - task_id=self.context["owner_id"]) diff --git a/tests/unit/plugins/openstack/context/quotas/__init__.py b/tests/unit/plugins/openstack/context/quotas/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/quotas/test_cinder_quotas.py b/tests/unit/plugins/openstack/context/quotas/test_cinder_quotas.py deleted file mode 100644 index 80632a5052..0000000000 --- a/tests/unit/plugins/openstack/context/quotas/test_cinder_quotas.py +++ /dev/null @@ -1,58 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.quotas import cinder_quotas -from tests.unit import test - - -class CinderQuotasTestCase(test.TestCase): - - def test_update(self): - mock_clients = mock.MagicMock() - cinder_quo = cinder_quotas.CinderQuotas(mock_clients) - tenant_id = mock.MagicMock() - quotas_values = { - "volumes": 10, - "snapshots": 50, - "backups": 20, - "backup_gigabytes": 1000, - "gigabytes": 1000 - } - cinder_quo.update(tenant_id, **quotas_values) - mock_clients.cinder().quotas.update.assert_called_once_with( - tenant_id, **quotas_values) - - def test_delete(self): - mock_clients = mock.MagicMock() - cinder_quo = cinder_quotas.CinderQuotas(mock_clients) - tenant_id = mock.MagicMock() - cinder_quo.delete(tenant_id) - mock_clients.cinder().quotas.delete.assert_called_once_with(tenant_id) - - def test_get(self): - tenant_id = "tenant_id" - quotas = {"gigabytes": "gb", - "snapshots": "ss", - "volumes": "v", - "backups": "b", - "backup_gigabytes": "b_g"} - quota_set = mock.MagicMock(**quotas) - clients = mock.MagicMock() - clients.cinder.return_value.quotas.get.return_value = quota_set - cinder_quo = cinder_quotas.CinderQuotas(clients) - - self.assertEqual(quotas, cinder_quo.get(tenant_id)) - clients.cinder().quotas.get.assert_called_once_with(tenant_id) diff --git a/tests/unit/plugins/openstack/context/quotas/test_designate_quotas.py b/tests/unit/plugins/openstack/context/quotas/test_designate_quotas.py deleted file mode 100644 index 9780dcd2f8..0000000000 --- a/tests/unit/plugins/openstack/context/quotas/test_designate_quotas.py +++ /dev/null @@ -1,53 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.quotas import designate_quotas -from tests.unit import test - - -class DesignateQuotasTestCase(test.TestCase): - - def test_update(self): - clients = mock.MagicMock() - quotas = designate_quotas.DesignateQuotas(clients) - tenant_id = mock.MagicMock() - quotas_values = { - "domains": 5, - "domain_recordsets": 20, - "domain_records": 20, - "recordset_records": 20, - } - quotas.update(tenant_id, **quotas_values) - clients.designate().quotas.update.assert_called_once_with( - tenant_id, quotas_values) - - def test_delete(self): - clients = mock.MagicMock() - quotas = designate_quotas.DesignateQuotas(clients) - tenant_id = mock.MagicMock() - quotas.delete(tenant_id) - clients.designate().quotas.reset.assert_called_once_with(tenant_id) - - def test_get(self): - tenant_id = "tenant_id" - quotas = {"domains": -1, "domain_recordsets": 2, "domain_records": 3, - "recordset_records": 3} - clients = mock.MagicMock() - clients.designate.return_value.quotas.get.return_value = quotas - designate_quo = designate_quotas.DesignateQuotas(clients) - - self.assertEqual(quotas, designate_quo.get(tenant_id)) - clients.designate().quotas.get.assert_called_once_with(tenant_id) diff --git a/tests/unit/plugins/openstack/context/quotas/test_manila_quotas.py b/tests/unit/plugins/openstack/context/quotas/test_manila_quotas.py deleted file mode 100644 index 224ad75b52..0000000000 --- a/tests/unit/plugins/openstack/context/quotas/test_manila_quotas.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.quotas import manila_quotas -from tests.unit import test - - -class ManilaQuotasTestCase(test.TestCase): - - def test_update(self): - clients = mock.MagicMock() - instance = manila_quotas.ManilaQuotas(clients) - tenant_id = mock.MagicMock() - quotas_values = { - "shares": 10, - "gigabytes": 13, - "snapshots": 7, - "snapshot_gigabytes": 51, - "share_networks": 1014, - } - - instance.update(tenant_id, **quotas_values) - - clients.manila.return_value.quotas.update.assert_called_once_with( - tenant_id, **quotas_values) - - def test_delete(self): - clients = mock.MagicMock() - instance = manila_quotas.ManilaQuotas(clients) - tenant_id = mock.MagicMock() - - instance.delete(tenant_id) - - clients.manila.return_value.quotas.delete.assert_called_once_with( - tenant_id) - - def test_get(self): - tenant_id = "tenant_id" - quotas = {"gigabytes": "gb", "snapshots": "ss", "shares": "v", - "snapshot_gigabytes": "sg", "share_networks": "sn"} - quota_set = mock.MagicMock(**quotas) - clients = mock.MagicMock() - clients.manila.return_value.quotas.get.return_value = quota_set - manila_quo = manila_quotas.ManilaQuotas(clients) - - self.assertEqual(quotas, manila_quo.get(tenant_id)) - clients.manila().quotas.get.assert_called_once_with(tenant_id) diff --git a/tests/unit/plugins/openstack/context/quotas/test_neutron_quotas.py b/tests/unit/plugins/openstack/context/quotas/test_neutron_quotas.py deleted file mode 100644 index 9a57c0e030..0000000000 --- a/tests/unit/plugins/openstack/context/quotas/test_neutron_quotas.py +++ /dev/null @@ -1,58 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.quotas import neutron_quotas -from tests.unit import test - - -class NeutronQuotasTestCase(test.TestCase): - def setUp(self): - super(NeutronQuotasTestCase, self).setUp() - self.quotas = { - "network": 20, - "subnet": 20, - "port": 100, - "router": 20, - "floatingip": 100, - "security_group": 100, - "security_group_rule": 100 - } - - def test_update(self): - clients = mock.MagicMock() - neutron_quo = neutron_quotas.NeutronQuotas(clients) - tenant_id = mock.MagicMock() - neutron_quo.update(tenant_id, **self.quotas) - body = {"quota": self.quotas} - clients.neutron().update_quota.assert_called_once_with(tenant_id, - body=body) - - def test_delete(self): - clients = mock.MagicMock() - neutron_quo = neutron_quotas.NeutronQuotas(clients) - tenant_id = mock.MagicMock() - neutron_quo.delete(tenant_id) - clients.neutron().delete_quota.assert_called_once_with(tenant_id) - - def test_get(self): - tenant_id = "tenant_id" - clients = mock.MagicMock() - clients.neutron.return_value.show_quota.return_value = { - "quota": self.quotas} - neutron_quo = neutron_quotas.NeutronQuotas(clients) - - self.assertEqual(self.quotas, neutron_quo.get(tenant_id)) - clients.neutron().show_quota.assert_called_once_with(tenant_id) diff --git a/tests/unit/plugins/openstack/context/quotas/test_nova_quotas.py b/tests/unit/plugins/openstack/context/quotas/test_nova_quotas.py deleted file mode 100644 index f32fb21722..0000000000 --- a/tests/unit/plugins/openstack/context/quotas/test_nova_quotas.py +++ /dev/null @@ -1,65 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.quotas import nova_quotas -from tests.unit import test - - -class NovaQuotasTestCase(test.TestCase): - - def setUp(self): - super(NovaQuotasTestCase, self).setUp() - self.quotas = { - "instances": 10, - "cores": 100, - "ram": 100000, - "floating_ips": 100, - "fixed_ips": 10000, - "metadata_items": 5, - "injected_files": 5, - "injected_file_content_bytes": 2048, - "injected_file_path_bytes": 1024, - "key_pairs": 50, - "security_groups": 50, - "security_group_rules": 50, - "server_group_members": 777, - "server_groups": 33 - } - - def test_update(self): - clients = mock.MagicMock() - nova_quo = nova_quotas.NovaQuotas(clients) - tenant_id = mock.MagicMock() - nova_quo.update(tenant_id, **self.quotas) - clients.nova().quotas.update.assert_called_once_with(tenant_id, - **self.quotas) - - def test_delete(self): - clients = mock.MagicMock() - nova_quo = nova_quotas.NovaQuotas(clients) - tenant_id = mock.MagicMock() - nova_quo.delete(tenant_id) - clients.nova().quotas.delete.assert_called_once_with(tenant_id) - - def test_get(self): - tenant_id = "tenant_id" - quota_set = mock.MagicMock(**self.quotas) - clients = mock.MagicMock() - clients.nova.return_value.quotas.get.return_value = quota_set - nova_quo = nova_quotas.NovaQuotas(clients) - - self.assertEqual(self.quotas, nova_quo.get(tenant_id)) - clients.nova().quotas.get.assert_called_once_with(tenant_id) diff --git a/tests/unit/plugins/openstack/context/quotas/test_quotas.py b/tests/unit/plugins/openstack/context/quotas/test_quotas.py deleted file mode 100644 index a484c8b704..0000000000 --- a/tests/unit/plugins/openstack/context/quotas/test_quotas.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright 2014: Dassault Systemes -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import ddt -import mock - -from rally.common import logging -from rally.plugins.openstack.context.quotas import quotas -from rally.task import context -from tests.unit import test - -QUOTAS_PATH = "rally.plugins.openstack.context.quotas" - - -@ddt.ddt -class QuotasTestCase(test.TestCase): - - def setUp(self): - super(QuotasTestCase, self).setUp() - self.unlimited = -1 - self.context = { - "config": { - }, - "tenants": { - "t1": {"credential": mock.MagicMock()}, - "t2": {"credential": mock.MagicMock()}}, - "admin": {"credential": mock.MagicMock()}, - "task": mock.MagicMock() - } - - @ddt.data(("cinder", "backup_gigabytes"), - ("cinder", "backups"), - ("cinder", "gigabytes"), - ("cinder", "snapshots"), - ("cinder", "volumes"), - ("manila", "gigabytes"), - ("manila", "share_networks"), - ("manila", "shares"), - ("manila", "snapshot_gigabytes"), - ("manila", "snapshots"), - ("neutron", "floatingip"), - ("neutron", "health_monitor"), - ("neutron", "network"), - ("neutron", "pool"), - ("neutron", "port"), - ("neutron", "router"), - ("neutron", "security_group"), - ("neutron", "security_group_rule"), - ("neutron", "subnet"), - ("neutron", "vip"), - ("nova", "cores"), - ("nova", "fixed_ips"), - ("nova", "floating_ips"), - ("nova", "injected_file_content_bytes"), - ("nova", "injected_file_path_bytes"), - ("nova", "injected_files"), - ("nova", "instances"), - ("nova", "key_pairs"), - ("nova", "metadata_items"), - ("nova", "ram"), - ("nova", "security_group_rules"), - ("nova", "security_groups"), - ("nova", "server_group_members"), - ("nova", "server_groups")) - @ddt.unpack - def test_validate(self, group, parameter): - configs = [ - ({group: {parameter: self.unlimited}}, True), - ({group: {parameter: 0}}, True), - ({group: {parameter: 10000}}, True), - ({group: {parameter: 2.5}}, False), - ({group: {parameter: "-1"}}, False), - ({group: {parameter: -2}}, False), - ] - for config, valid in configs: - results = context.Context.validate( - "quotas", None, None, config, vtype="syntax") - if valid: - self.assertEqual([], results) - else: - self.assertGreater(len(results), 0) - - @mock.patch("%s.quotas.osclients.Clients" % QUOTAS_PATH) - @mock.patch("%s.cinder_quotas.CinderQuotas" % QUOTAS_PATH) - @ddt.data(True, False) - def test_cinder_quotas(self, ex_users, mock_cinder_quotas, mock_clients): - cinder_quo = mock_cinder_quotas.return_value - ctx = copy.deepcopy(self.context) - if ex_users: - ctx["existing_users"] = None - ctx["config"]["quotas"] = { - "cinder": { - "volumes": self.unlimited, - "snapshots": self.unlimited, - "gigabytes": self.unlimited - } - } - - tenants = ctx["tenants"] - cinder_quotas = ctx["config"]["quotas"]["cinder"] - cinder_quo.get.return_value = cinder_quotas - with quotas.Quotas(ctx) as quotas_ctx: - quotas_ctx.setup() - if ex_users: - self.assertEqual([mock.call(tenant) for tenant in tenants], - cinder_quo.get.call_args_list) - self.assertEqual([mock.call(tenant, **cinder_quotas) - for tenant in tenants], - cinder_quo.update.call_args_list) - mock_cinder_quotas.reset_mock() - - if ex_users: - self.assertEqual([mock.call(tenant, **cinder_quotas) - for tenant in tenants], - cinder_quo.update.call_args_list) - else: - self.assertEqual([mock.call(tenant) for tenant in tenants], - cinder_quo.delete.call_args_list) - - @mock.patch("%s.quotas.osclients.Clients" % QUOTAS_PATH) - @mock.patch("%s.nova_quotas.NovaQuotas" % QUOTAS_PATH) - @ddt.data(True, False) - def test_nova_quotas(self, ex_users, mock_nova_quotas, mock_clients): - nova_quo = mock_nova_quotas.return_value - ctx = copy.deepcopy(self.context) - if ex_users: - ctx["existing_users"] = None - - ctx["config"]["quotas"] = { - "nova": { - "instances": self.unlimited, - "cores": self.unlimited, - "ram": self.unlimited, - "floating-ips": self.unlimited, - "fixed-ips": self.unlimited, - "metadata_items": self.unlimited, - "injected_files": self.unlimited, - "injected_file_content_bytes": self.unlimited, - "injected_file_path_bytes": self.unlimited, - "key_pairs": self.unlimited, - "security_groups": self.unlimited, - "security_group_rules": self.unlimited, - } - } - - tenants = ctx["tenants"] - nova_quotas = ctx["config"]["quotas"]["nova"] - nova_quo.get.return_value = nova_quotas - with quotas.Quotas(ctx) as quotas_ctx: - quotas_ctx.setup() - if ex_users: - self.assertEqual([mock.call(tenant) for tenant in tenants], - nova_quo.get.call_args_list) - self.assertEqual([mock.call(tenant, **nova_quotas) - for tenant in tenants], - nova_quo.update.call_args_list) - mock_nova_quotas.reset_mock() - - if ex_users: - self.assertEqual([mock.call(tenant, **nova_quotas) - for tenant in tenants], - nova_quo.update.call_args_list) - else: - self.assertEqual([mock.call(tenant) for tenant in tenants], - nova_quo.delete.call_args_list) - - @mock.patch("%s.quotas.osclients.Clients" % QUOTAS_PATH) - @mock.patch("%s.neutron_quotas.NeutronQuotas" % QUOTAS_PATH) - @ddt.data(True, False) - def test_neutron_quotas(self, ex_users, mock_neutron_quotas, mock_clients): - neutron_quo = mock_neutron_quotas.return_value - ctx = copy.deepcopy(self.context) - if ex_users: - ctx["existing_users"] = None - - ctx["config"]["quotas"] = { - "neutron": { - "network": self.unlimited, - "subnet": self.unlimited, - "port": self.unlimited, - "router": self.unlimited, - "floatingip": self.unlimited, - "security_group": self.unlimited, - "security_group_rule": self.unlimited - } - } - - tenants = ctx["tenants"] - neutron_quotas = ctx["config"]["quotas"]["neutron"] - neutron_quo.get.return_value = neutron_quotas - with quotas.Quotas(ctx) as quotas_ctx: - quotas_ctx.setup() - if ex_users: - self.assertEqual([mock.call(tenant) for tenant in tenants], - neutron_quo.get.call_args_list) - self.assertEqual([mock.call(tenant, **neutron_quotas) - for tenant in tenants], - neutron_quo.update.call_args_list) - neutron_quo.reset_mock() - - if ex_users: - self.assertEqual([mock.call(tenant, **neutron_quotas) - for tenant in tenants], - neutron_quo.update.call_args_list) - else: - self.assertEqual([mock.call(tenant) for tenant in tenants], - neutron_quo.delete.call_args_list) - - @mock.patch("rally.plugins.openstack.context." - "quotas.quotas.osclients.Clients") - @mock.patch("rally.plugins.openstack.context." - "quotas.nova_quotas.NovaQuotas") - @mock.patch("rally.plugins.openstack.context." - "quotas.cinder_quotas.CinderQuotas") - @mock.patch("rally.plugins.openstack.context." - "quotas.neutron_quotas.NeutronQuotas") - def test_no_quotas(self, mock_neutron_quotas, mock_cinder_quotas, - mock_nova_quotas, mock_clients): - ctx = copy.deepcopy(self.context) - if "quotas" in ctx["config"]: - del ctx["config"]["quotas"] - - with quotas.Quotas(ctx) as quotas_ctx: - quotas_ctx.setup() - self.assertFalse(mock_cinder_quotas.update.called) - self.assertFalse(mock_nova_quotas.update.called) - self.assertFalse(mock_neutron_quotas.update.called) - - self.assertFalse(mock_cinder_quotas.delete.called) - self.assertFalse(mock_nova_quotas.delete.called) - self.assertFalse(mock_neutron_quotas.delete.called) - - @ddt.data( - {"quotas_ctxt": {"nova": {"cpu": 1}}, - "quotas_class_path": "nova_quotas.NovaQuotas"}, - {"quotas_ctxt": {"neutron": {"network": 2}}, - "quotas_class_path": "neutron_quotas.NeutronQuotas"}, - {"quotas_ctxt": {"cinder": {"volumes": 3}}, - "quotas_class_path": "cinder_quotas.CinderQuotas"}, - {"quotas_ctxt": {"manila": {"shares": 4}}, - "quotas_class_path": "manila_quotas.ManilaQuotas"}, - {"quotas_ctxt": {"designate": {"domains": 5}}, - "quotas_class_path": "designate_quotas.DesignateQuotas"}, - ) - @ddt.unpack - def test_exception_during_cleanup(self, quotas_ctxt, quotas_class_path): - quotas_path = "%s.%s" % (QUOTAS_PATH, quotas_class_path) - with mock.patch(quotas_path) as mock_quotas: - mock_quotas.return_value.update.side_effect = Exception - - ctx = copy.deepcopy(self.context) - ctx["config"]["quotas"] = quotas_ctxt - - quotas_instance = quotas.Quotas(ctx) - quotas_instance.original_quotas = [] - for service in quotas_ctxt: - for tenant in self.context["tenants"]: - quotas_instance.original_quotas.append( - (service, tenant, quotas_ctxt[service])) - # NOTE(boris-42): ensure that cleanup didn't raise exceptions. - with logging.LogCatcher(quotas.LOG) as log: - quotas_instance.cleanup() - - log.assertInLogs("Failed to restore quotas for tenant") - - self.assertEqual(mock_quotas.return_value.update.call_count, - len(self.context["tenants"])) diff --git a/tests/unit/plugins/openstack/context/sahara/__init__.py b/tests/unit/plugins/openstack/context/sahara/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/sahara/test_sahara_cluster.py b/tests/unit/plugins/openstack/context/sahara/test_sahara_cluster.py deleted file mode 100644 index 3ed20e490c..0000000000 --- a/tests/unit/plugins/openstack/context/sahara/test_sahara_cluster.py +++ /dev/null @@ -1,148 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import cfg -from rally import exceptions -from rally.plugins.openstack.context.sahara import sahara_cluster -from rally.plugins.openstack.scenarios.sahara import utils as sahara_utils -from tests.unit import test - -CONF = cfg.CONF - -CTX = "rally.plugins.openstack.context.sahara" - - -class SaharaClusterTestCase(test.ScenarioTestCase): - - patch_task_utils = False - - def setUp(self): - super(SaharaClusterTestCase, self).setUp() - self.tenants_num = 2 - self.users_per_tenant = 2 - self.users = self.tenants_num * self.users_per_tenant - - self.tenants = {} - self.users_key = [] - - for i in range(self.tenants_num): - self.tenants[str(i)] = {"id": str(i), "name": str(i), - "sahara": {"image": "42"}} - for j in range(self.users_per_tenant): - self.users_key.append({"id": "%s_%s" % (str(i), str(j)), - "tenant_id": str(i), - "credential": mock.MagicMock()}) - - CONF.set_override("sahara_cluster_check_interval", 0, "openstack") - - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant - }, - "sahara_cluster": { - "master_flavor_id": "test_flavor_m", - "worker_flavor_id": "test_flavor_w", - "workers_count": 2, - "plugin_name": "test_plugin", - "hadoop_version": "test_version" - } - }, - "admin": {"credential": mock.MagicMock()}, - "users": self.users_key, - "tenants": self.tenants - }) - - @mock.patch("%s.sahara_cluster.resource_manager.cleanup" % CTX) - @mock.patch("%s.sahara_cluster.utils.SaharaScenario._launch_cluster" % CTX, - return_value=mock.MagicMock(id=42)) - def test_setup_and_cleanup(self, mock_sahara_scenario__launch_cluster, - mock_cleanup): - sahara_ctx = sahara_cluster.SaharaCluster(self.context) - - launch_cluster_calls = [] - - for i in self.tenants: - launch_cluster_calls.append(mock.call( - flavor_id=None, - plugin_name="test_plugin", - hadoop_version="test_version", - master_flavor_id="test_flavor_m", - worker_flavor_id="test_flavor_w", - workers_count=2, - image_id=self.context["tenants"][i]["sahara"]["image"], - floating_ip_pool=None, - volumes_per_node=None, - volumes_size=1, - auto_security_group=True, - security_groups=None, - node_configs=None, - cluster_configs=None, - enable_anti_affinity=False, - enable_proxy=False, - wait_active=False, - use_autoconfig=True - )) - - self.clients("sahara").clusters.get.side_effect = [ - mock.MagicMock(status="not-active"), - mock.MagicMock(status="active")] - sahara_ctx.setup() - - mock_sahara_scenario__launch_cluster.assert_has_calls( - launch_cluster_calls) - sahara_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["sahara.clusters"], - users=self.context["users"], - superclass=sahara_utils.SaharaScenario, - task_id=self.context["owner_id"]) - - @mock.patch("%s.sahara_cluster.utils.SaharaScenario._launch_cluster" % CTX, - return_value=mock.MagicMock(id=42)) - def test_setup_and_cleanup_error(self, - mock_sahara_scenario__launch_cluster): - sahara_ctx = sahara_cluster.SaharaCluster(self.context) - - launch_cluster_calls = [] - - for i in self.tenants: - launch_cluster_calls.append(mock.call( - flavor_id=None, - plugin_name="test_plugin", - hadoop_version="test_version", - master_flavor_id="test_flavor_m", - worker_flavor_id="test_flavor_w", - workers_count=2, - image_id=self.context["tenants"][i]["sahara"]["image"], - floating_ip_pool=None, - volumes_per_node=None, - volumes_size=1, - auto_security_groups=True, - security_groups=None, - node_configs=None, - cluster_configs=None, - wait_active=False, - use_autoconfig=True - )) - - self.clients("sahara").clusters.get.side_effect = [ - mock.MagicMock(status="not-active"), - mock.MagicMock(status="error") - ] - - self.assertRaises(exceptions.ContextSetupFailure, sahara_ctx.setup) diff --git a/tests/unit/plugins/openstack/context/sahara/test_sahara_image.py b/tests/unit/plugins/openstack/context/sahara/test_sahara_image.py deleted file mode 100644 index df512af8fe..0000000000 --- a/tests/unit/plugins/openstack/context/sahara/test_sahara_image.py +++ /dev/null @@ -1,184 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.context.sahara import sahara_image -from tests.unit import test - - -BASE_CTX = "rally.task.context" -CTX = "rally.plugins.openstack.context.sahara.sahara_image" -BASE_SCN = "rally.task.scenarios" -SCN = "rally.plugins.openstack.scenarios" - - -class SaharaImageTestCase(test.ScenarioTestCase): - - def setUp(self): - super(SaharaImageTestCase, self).setUp() - self.tenants_num = 2 - self.users_per_tenant = 2 - self.users = self.tenants_num * self.users_per_tenant - self.task = mock.MagicMock() - - self.tenants = {} - self.users_key = [] - - for i in range(self.tenants_num): - self.tenants[str(i)] = {"id": str(i), "name": str(i), - "sahara": {"image": "42"}} - for j in range(self.users_per_tenant): - self.users_key.append({"id": "%s_%s" % (str(i), str(j)), - "tenant_id": str(i), - "credential": mock.MagicMock()}) - - @property - def url_image_context(self): - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - }, - "sahara_image": { - "image_url": "http://somewhere", - "plugin_name": "test_plugin", - "hadoop_version": "test_version", - "username": "test_user" - } - }, - "admin": {"credential": mock.MagicMock()}, - "users": self.users_key, - "tenants": self.tenants - }) - return self.context - - @property - def existing_image_context(self): - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - }, - "sahara_image": { - "image_uuid": "some_id" - } - }, - "admin": {"credential": mock.MagicMock()}, - "users": self.users_key, - "tenants": self.tenants, - }) - return self.context - - @mock.patch("rally.plugins.openstack.services." - "image.image.Image") - @mock.patch("%s.resource_manager.cleanup" % CTX) - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_setup_and_cleanup_url_image(self, mock_clients, - mock_cleanup, mock_image): - - ctx = self.url_image_context - sahara_ctx = sahara_image.SaharaImage(ctx) - sahara_ctx.generate_random_name = mock.Mock() - image_service = mock.Mock() - mock_image.return_value = image_service - image_service.create_image.return_value = mock.Mock(id=42) - clients = mock.Mock() - mock_clients.return_value = clients - sahara_client = mock.Mock() - clients.sahara.return_value = sahara_client - - glance_calls = [] - - for i in range(self.tenants_num): - glance_calls.append( - mock.call(container_format="bare", - image_location="http://somewhere", - disk_format="qcow2")) - - sahara_update_image_calls = [] - sahara_update_tags_calls = [] - - for i in range(self.tenants_num): - sahara_update_image_calls.append(mock.call(image_id=42, - user_name="test_user", - desc="")) - sahara_update_tags_calls.append(mock.call( - image_id=42, - new_tags=["test_plugin", "test_version"])) - - sahara_ctx.setup() - image_service.create_image.assert_has_calls(glance_calls) - sahara_client.images.update_image.assert_has_calls( - sahara_update_image_calls) - sahara_client.images.update_tags.assert_has_calls( - sahara_update_tags_calls) - - sahara_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["glance.images"], - users=ctx["users"], - superclass=sahara_ctx.__class__, - task_id=ctx["owner_id"]) - - @mock.patch("%s.glance.utils.GlanceScenario._create_image" % SCN, - return_value=mock.MagicMock(id=42)) - @mock.patch("%s.resource_manager.cleanup" % CTX) - @mock.patch("%s.osclients.Clients" % CTX) - def test_setup_and_cleanup_existing_image( - self, mock_clients, mock_cleanup, - mock_glance_scenario__create_image): - - mock_clients.glance.images.get.return_value = mock.MagicMock( - is_public=True) - - ctx = self.existing_image_context - sahara_ctx = sahara_image.SaharaImage(ctx) - - sahara_ctx.setup() - for tenant_id in sahara_ctx.context["tenants"]: - image_id = ( - sahara_ctx.context["tenants"][tenant_id]["sahara"]["image"]) - self.assertEqual("some_id", image_id) - - self.assertFalse(mock_glance_scenario__create_image.called) - - sahara_ctx.cleanup() - self.assertFalse(mock_cleanup.called) - - @mock.patch("%s.osclients.Glance.create_client" % CTX) - def test_check_existing_image(self, mock_glance_create_client): - - ctx = self.existing_image_context - sahara_ctx = sahara_image.SaharaImage(ctx) - sahara_ctx.setup() - - mock_glance_create_client.images.get.asser_called_once_with("some_id") - - @mock.patch("%s.osclients.Glance.create_client" % CTX) - def test_check_existing_private_image_fail(self, - mock_glance_create_client): - - mock_glance_create_client.return_value.images.get.return_value = ( - mock.MagicMock(is_public=False)) - - ctx = self.existing_image_context - sahara_ctx = sahara_image.SaharaImage(ctx) - self.assertRaises(exceptions.ContextSetupFailure, - sahara_ctx.setup) - - mock_glance_create_client.images.get.asser_called_once_with("some_id") diff --git a/tests/unit/plugins/openstack/context/sahara/test_sahara_input_data_sources.py b/tests/unit/plugins/openstack/context/sahara/test_sahara_input_data_sources.py deleted file mode 100644 index 9bad87064b..0000000000 --- a/tests/unit/plugins/openstack/context/sahara/test_sahara_input_data_sources.py +++ /dev/null @@ -1,173 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.sahara import sahara_input_data_sources -from rally.plugins.openstack.scenarios.swift import utils as swift_utils -from tests.unit import test - -CTX = "rally.plugins.openstack.context.sahara" - - -class SaharaInputDataSourcesTestCase(test.ScenarioTestCase): - - def setUp(self): - super(SaharaInputDataSourcesTestCase, self).setUp() - self.tenants_num = 2 - self.users_per_tenant = 2 - self.task = mock.MagicMock() - self.tenants = {} - self.users = [] - - for i in range(self.tenants_num): - tenant_id = "tenant_%d" % i - self.tenants[tenant_id] = {"id": tenant_id, - "name": tenant_id + "_name", - "sahara": {"image": "foo_image"}} - for u in range(self.users_per_tenant): - user_id = "%s_user_%d" % (tenant_id, u) - self.users.append( - {"id": user_id, - "tenant_id": tenant_id, - "credential": mock.Mock(auth_url="foo_url", - username=user_id + "_name", - password="foo_password")}) - - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - }, - "sahara_input_data_sources": { - "input_type": "hdfs", - "input_url": "hdfs://test_host/", - }, - }, - "admin": {"credential": mock.MagicMock()}, - "users": self.users, - "tenants": self.tenants - }) - - @mock.patch("%s.sahara_input_data_sources.resource_manager.cleanup" % CTX) - @mock.patch("%s.sahara_input_data_sources.osclients" % CTX) - def test_setup_and_cleanup(self, mock_osclients, mock_cleanup): - - mock_sahara = mock_osclients.Clients.return_value.sahara.return_value - mock_sahara.data_sources.create.return_value = mock.MagicMock(id=42) - - sahara_ctx = sahara_input_data_sources.SaharaInputDataSources( - self.context) - sahara_ctx.generate_random_name = mock.Mock() - - input_ds_crete_calls = [] - - for i in range(self.tenants_num): - input_ds_crete_calls.append(mock.call( - name=sahara_ctx.generate_random_name.return_value, - description="", - data_source_type="hdfs", - url="hdfs://test_host/")) - - sahara_ctx.setup() - - mock_sahara.data_sources.create.assert_has_calls( - input_ds_crete_calls) - - sahara_ctx.cleanup() - - mock_cleanup.assert_has_calls(( - mock.call(names=["swift.object", "swift.container"], - users=self.context["users"], - superclass=swift_utils.SwiftScenario, - task_id=self.context["owner_id"]), - mock.call( - names=["sahara.data_sources"], - users=self.context["users"], - superclass=sahara_input_data_sources.SaharaInputDataSources, - task_id=self.context["owner_id"]))) - - @mock.patch("requests.get") - @mock.patch("%s.sahara_input_data_sources.osclients" % CTX) - @mock.patch("%s.sahara_input_data_sources.resource_manager" % CTX) - @mock.patch("%s.sahara_input_data_sources.swift_utils" % CTX) - def test_setup_inputs_swift(self, mock_swift_utils, mock_resource_manager, - mock_osclients, mock_get): - mock_swift_scenario = mock.Mock() - mock_swift_scenario._create_container.side_effect = ( - lambda container_name: "container_%s" % container_name) - mock_swift_scenario._upload_object.side_effect = iter( - ["uploaded_%d" % i for i in range(10)]) - mock_swift_utils.SwiftScenario.return_value = mock_swift_scenario - - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - }, - "sahara_input_data_sources": { - "input_type": "swift", - "input_url": "swift://rally.sahara/input_url", - "swift_files": [{ - "name": "first", - "download_url": "http://host"}] - }, - }, - "admin": {"credential": mock.MagicMock()}, - "task": mock.MagicMock(), - "users": self.users, - "tenants": self.tenants - }) - sahara_ctx = sahara_input_data_sources.SaharaInputDataSources( - self.context) - sahara_ctx.generate_random_name = mock.Mock( - side_effect=iter(["random_name_%d" % i for i in range(10)])) - - input_ds_create_calls = [] - - for i in range(self.tenants_num): - input_ds_create_calls.append(mock.call( - name="random_name_%d" % i, - description="", - data_source_type="swift", - url="swift://rally.sahara/input_url", - credential_user="tenant_%d_user_0_name" % i, - credential_pass="foo_password" - )) - - sahara_ctx.setup() - - self.assertEqual( - input_ds_create_calls, - (mock_osclients.Clients.return_value.sahara.return_value - .data_sources.create.mock_calls)) - - self.assertEqual({"container_name": "container_rally_rally", - "swift_objects": ["uploaded_0", "uploaded_1"]}, - self.context["sahara"]) - - sahara_ctx.cleanup() - - mock_resource_manager.cleanup.assert_has_calls(( - mock.call(names=["swift.object", "swift.container"], - users=self.context["users"], - superclass=mock_swift_utils.SwiftScenario, - task_id=self.context["owner_id"]), - mock.call( - names=["sahara.data_sources"], - users=self.context["users"], - superclass=sahara_input_data_sources.SaharaInputDataSources, - task_id=self.context["owner_id"]))) diff --git a/tests/unit/plugins/openstack/context/sahara/test_sahara_job_binaries.py b/tests/unit/plugins/openstack/context/sahara/test_sahara_job_binaries.py deleted file mode 100644 index 5e38c70e41..0000000000 --- a/tests/unit/plugins/openstack/context/sahara/test_sahara_job_binaries.py +++ /dev/null @@ -1,144 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.sahara import sahara_job_binaries -from rally.plugins.openstack.scenarios.sahara import utils as sahara_utils -from tests.unit import test - -CTX = "rally.plugins.openstack.context.sahara" - - -class SaharaJobBinariesTestCase(test.ScenarioTestCase): - - def setUp(self): - super(SaharaJobBinariesTestCase, self).setUp() - self.tenants_num = 2 - self.users_per_tenant = 2 - self.users = self.tenants_num * self.users_per_tenant - self.task = mock.MagicMock() - - self.tenants = {} - self.users_key = [] - - for i in range(self.tenants_num): - self.tenants[str(i)] = {"id": str(i), "name": str(i), - "sahara": {"image": "42"}} - for j in range(self.users_per_tenant): - self.users_key.append({"id": "%s_%s" % (str(i), str(j)), - "tenant_id": str(i), - "credential": "credential"}) - - self.user_key = [{"id": i, "tenant_id": j, "credential": "credential"} - for j in range(self.tenants_num) - for i in range(self.users_per_tenant)] - - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - }, - "sahara_job_binaries": { - "libs": [ - { - "name": "test.jar", - "download_url": "http://example.com/test.jar" - } - ], - "mains": [ - { - "name": "test.jar", - "download_url": "http://example.com/test.jar" - } - ] - }, - }, - "admin": {"credential": mock.MagicMock()}, - "task": mock.MagicMock(), - "users": self.users_key, - "tenants": self.tenants - }) - - @mock.patch("%s.sahara_job_binaries.resource_manager.cleanup" % CTX) - @mock.patch(("%s.sahara_job_binaries.SaharaJobBinaries." - "download_and_save_lib") % CTX) - @mock.patch("%s.sahara_job_binaries.osclients" % CTX) - def test_setup_and_cleanup( - self, - mock_osclients, - mock_sahara_job_binaries_download_and_save_lib, - mock_cleanup): - - mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara() - - sahara_ctx = sahara_job_binaries.SaharaJobBinaries(self.context) - - download_calls = [] - - for i in range(self.tenants_num): - download_calls.append(mock.call( - sahara=mock_sahara, - lib_type="mains", - name="test.jar", - download_url="http://example.com/test.jar", - tenant_id=str(i))) - download_calls.append(mock.call( - sahara=mock_sahara, - lib_type="libs", - name="test.jar", - download_url="http://example.com/test.jar", - tenant_id=str(i))) - - sahara_ctx.setup() - - (mock_sahara_job_binaries_download_and_save_lib. - assert_has_calls(download_calls)) - - sahara_ctx.cleanup() - - mock_cleanup.assert_called_once_with( - names=["sahara.job_binary_internals", "sahara.job_binaries"], - users=self.context["users"], - superclass=sahara_utils.SaharaScenario, - task_id=self.context["task"]["uuid"]) - - @mock.patch("%s.sahara_job_binaries.requests" % CTX) - @mock.patch("%s.sahara_job_binaries.osclients" % CTX) - def test_download_and_save_lib(self, mock_osclients, mock_requests): - - mock_requests.get.content.return_value = "some_binary_content" - mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara() - mock_sahara.job_binary_internals.create.return_value = ( - mock.MagicMock(id=42)) - - sahara_ctx = sahara_job_binaries.SaharaJobBinaries(self.context) - - sahara_ctx.context["tenants"]["0"]["sahara"] = {"mains": []} - sahara_ctx.context["tenants"]["0"]["sahara"]["libs"] = [] - - sahara_ctx.download_and_save_lib(sahara=mock_sahara, - lib_type="mains", - name="test_binary", - download_url="http://somewhere", - tenant_id="0") - - sahara_ctx.download_and_save_lib(sahara=mock_sahara, - lib_type="libs", - name="test_binary_2", - download_url="http://somewhere", - tenant_id="0") - - mock_requests.get.assert_called_once_with("http://somewhere") diff --git a/tests/unit/plugins/openstack/context/sahara/test_sahara_output_data_sources.py b/tests/unit/plugins/openstack/context/sahara/test_sahara_output_data_sources.py deleted file mode 100644 index e48390001f..0000000000 --- a/tests/unit/plugins/openstack/context/sahara/test_sahara_output_data_sources.py +++ /dev/null @@ -1,154 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.sahara import sahara_output_data_sources -from rally.plugins.openstack import credential as oscredential -from tests.unit import test - -CTX = "rally.plugins.openstack.context.sahara" - - -class SaharaOutputDataSourcesTestCase(test.ScenarioTestCase): - - def setUp(self): - super(SaharaOutputDataSourcesTestCase, self).setUp() - fake_dict = oscredential.OpenStackCredential( - "http://fake.example.org:5000/v2.0/", "user", "passwd") - self.tenants_num = 2 - self.users_per_tenant = 2 - self.users = self.tenants_num * self.users_per_tenant - self.task = mock.MagicMock() - - self.tenants = {} - self.users_key = [] - - for i in range(self.tenants_num): - self.tenants[str(i)] = {"id": str(i), "name": str(i), - "sahara": {"image": "42"}} - for j in range(self.users_per_tenant): - self.users_key.append({"id": "%s_%s" % (str(i), str(j)), - "tenant_id": str(i), - "credential": fake_dict}) - - self.user_key = [{"id": i, "tenant_id": j, "credential": "credential"} - for j in range(self.tenants_num) - for i in range(self.users_per_tenant)] - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - }, - "sahara_output_data_sources": { - "output_type": "hdfs", - "output_url_prefix": "hdfs://test_host/", - }, - }, - "admin": {"credential": mock.MagicMock()}, - "task": mock.MagicMock(), - "users": self.users_key, - "tenants": self.tenants - }) - - def check_setup(self): - context = sahara_output_data_sources.SaharaOutputDataSources.context[ - "sahara"]["output_conf"] - self.assertIsNotNone(context.get("output_type")) - self.assertIsNotNone(context.get("output_url_prefix")) - - @mock.patch("%s.sahara_output_data_sources.resource_manager.cleanup" % CTX) - @mock.patch("%s.sahara_output_data_sources.osclients" % CTX) - def test_setup_and_cleanup_hdfs(self, mock_osclients, mock_cleanup): - - mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara() - mock_sahara.data_sources.create.return_value = mock.MagicMock( - id=42) - - sahara_ctx = sahara_output_data_sources.SaharaOutputDataSources( - self.context) - sahara_ctx.generate_random_name = mock.Mock() - - output_ds_crete_calls = [] - - for i in range(self.tenants_num): - output_ds_crete_calls.append(mock.call( - name=sahara_ctx.generate_random_name.return_value, - description="", - data_source_type="hdfs", - url="hdfs://test_host/")) - - sahara_ctx.setup() - - mock_sahara.data_sources.create.assert_has_calls( - output_ds_crete_calls) - - sahara_ctx.cleanup() - - mock_cleanup.assert_has_calls(( - mock.call( - names=["swift.object", "swift.container"], - users=self.context["users"], - superclass=sahara_output_data_sources.SaharaOutputDataSources, - task_id=self.context["owner_id"]), - mock.call( - names=["sahara.data_sources"], - users=self.context["users"], - superclass=sahara_output_data_sources.SaharaOutputDataSources, - task_id=self.context["owner_id"]))) - - @mock.patch("%s.sahara_output_data_sources.osclients" % CTX) - def test_setup_inputs_swift(self, mock_osclients): - mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara() - - self.context.update({ - "config": { - "users": { - "tenants": self.tenants_num, - "users_per_tenant": self.users_per_tenant, - }, - "sahara_output_data_sources": { - "output_type": "swift", - "output_url_prefix": "rally", - }, - }, - "admin": {"credential": mock.MagicMock()}, - "task": mock.MagicMock(), - "users": self.users_key, - "tenants": self.tenants, - "user_choice_method": "random", - }) - - sahara_ctx = sahara_output_data_sources.SaharaOutputDataSources( - self.context) - sahara_ctx.generate_random_name = mock.Mock(return_value="random_name") - - output_ds_crete_calls = [] - for i in range(self.tenants_num): - output_ds_crete_calls.append(mock.call( - name="random_name", - description="", - data_source_type="swift", - url="swift://random_name.sahara/", - credential_user="user", - credential_pass="passwd" - )) - - sahara_ctx.setup() - - mock_sahara.data_sources.create.assert_has_calls( - output_ds_crete_calls) - - sahara_ctx.cleanup() diff --git a/tests/unit/plugins/openstack/context/senlin/__init__.py b/tests/unit/plugins/openstack/context/senlin/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/senlin/test_profiles.py b/tests/unit/plugins/openstack/context/senlin/test_profiles.py deleted file mode 100644 index 4cbb7da8f1..0000000000 --- a/tests/unit/plugins/openstack/context/senlin/test_profiles.py +++ /dev/null @@ -1,84 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.senlin import profiles -from tests.unit import test - - -BASE_CTX = "rally.task.context" -CTX = "rally.plugins.openstack.context" -BASE_SCN = "rally.task.scenarios" -SCN = "rally.plugins.openstack.scenarios" - - -class ProfilesGeneratorTestCase(test.ScenarioTestCase): - """Generate tenants.""" - def _gen_tenants(self, count): - tenants = {} - for _id in range(count): - tenants[str(_id)] = {"id": str(_id)} - return tenants - - def setUp(self): - super(ProfilesGeneratorTestCase, self).setUp() - self.tenants_count = 2 - self.users_per_tenant = 3 - tenants = self._gen_tenants(self.tenants_count) - users = [] - for tenant in tenants: - for i in range(self.users_per_tenant): - users.append({"id": i, "tenant_id": tenant, - "credential": mock.MagicMock()}) - - self.context = { - "config": { - "users": { - "tenants": self.tenants_count, - "users_per_tenant": self.users_per_tenant - }, - "profiles": { - "type": "profile_type_name", - "version": "1.0", - "properties": {"k1": "v1", "k2": "v2"} - }, - }, - "users": users, - "tenants": tenants, - "task": mock.MagicMock() - } - - @mock.patch("%s.senlin.utils.SenlinScenario._create_profile" % SCN, - return_value=mock.MagicMock(id="TEST_PROFILE_ID")) - def test_setup(self, mock_senlin_scenario__create_profile): - profile_ctx = profiles.ProfilesGenerator(self.context) - profile_ctx.setup() - spec = self.context["config"]["profiles"] - - mock_calls = [mock.call(spec) for i in range(self.tenants_count)] - mock_senlin_scenario__create_profile.assert_has_calls(mock_calls) - - for tenant in self.context["tenants"]: - self.assertEqual("TEST_PROFILE_ID", - self.context["tenants"][tenant]["profile"]) - - @mock.patch("%s.senlin.utils.SenlinScenario._delete_profile" % SCN) - def test_cleanup(self, mock_senlin_scenario__delete_profile): - for tenant in self.context["tenants"]: - self.context["tenants"][tenant].update( - {"profile": "TEST_PROFILE_ID"}) - profile_ctx = profiles.ProfilesGenerator(self.context) - profile_ctx.cleanup() - mock_calls = [mock.call("TEST_PROFILE_ID") for i in range( - self.tenants_count)] - mock_senlin_scenario__delete_profile.assert_has_calls(mock_calls) diff --git a/tests/unit/plugins/openstack/context/swift/__init__.py b/tests/unit/plugins/openstack/context/swift/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/swift/test_objects.py b/tests/unit/plugins/openstack/context/swift/test_objects.py deleted file mode 100644 index f1b48971f5..0000000000 --- a/tests/unit/plugins/openstack/context/swift/test_objects.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.context.swift import objects -from tests.unit import test - - -class SwiftObjectGeneratorTestCase(test.TestCase): - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_setup(self, mock_clients): - containers_per_tenant = 2 - objects_per_container = 7 - context = test.get_test_context() - context.update({ - "config": { - "swift_objects": { - "containers_per_tenant": containers_per_tenant, - "objects_per_container": objects_per_container, - "object_size": 1024, - "resource_management_workers": 10 - } - }, - "tenants": { - "t1": {"name": "t1_name"}, - "t2": {"name": "t2_name"} - }, - "users": [ - { - "id": "u1", - "tenant_id": "t1", - "credential": mock.MagicMock() - }, - { - "id": "u2", - "tenant_id": "t2", - "credential": mock.MagicMock() - } - ] - }) - - objects_ctx = objects.SwiftObjectGenerator(context) - objects_ctx.setup() - - for tenant_id in context["tenants"]: - containers = context["tenants"][tenant_id]["containers"] - self.assertEqual(containers_per_tenant, len(containers)) - for container in containers: - self.assertEqual(objects_per_container, - len(container["objects"])) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - @mock.patch("rally.plugins.openstack.context.swift.utils." - "swift_utils.SwiftScenario") - def test_cleanup(self, mock_swift_scenario, mock_clients): - context = test.get_test_context() - context.update({ - "config": { - "swift_objects": { - "resource_management_workers": 1 - } - }, - "tenants": { - "t1": { - "name": "t1_name", - "containers": [ - {"user": {"id": "u1", "tenant_id": "t1", - "credential": "c1"}, - "container": "c1", - "objects": ["o1", "o2", "o3"]} - ] - }, - "t2": { - "name": "t2_name", - "containers": [ - {"user": {"id": "u2", "tenant_id": "t2", - "credential": "c2"}, - "container": "c2", - "objects": ["o4", "o5", "o6"]} - ] - } - } - }) - - objects_ctx = objects.SwiftObjectGenerator(context) - objects_ctx.cleanup() - - expected_containers = ["c1", "c2"] - mock_swift_scenario.return_value._delete_container.assert_has_calls( - [mock.call(con) for con in expected_containers], any_order=True) - - expected_objects = [("c1", "o1"), ("c1", "o2"), ("c1", "o3"), - ("c2", "o4"), ("c2", "o5"), ("c2", "o6")] - mock_swift_scenario.return_value._delete_object.assert_has_calls( - [mock.call(con, obj) for con, obj in expected_objects], - any_order=True) - - for tenant_id in context["tenants"]: - self.assertEqual(0, - len(context["tenants"][tenant_id]["containers"])) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_setup_failure_clients_put_container(self, mock_clients): - context = test.get_test_context() - context.update({ - "config": { - "swift_objects": { - "containers_per_tenant": 2, - "object_size": 10, - "resource_management_workers": 5 - } - }, - "tenants": { - "t1": {"name": "t1_name"}, - "t2": {"name": "t2_name"} - }, - "users": [ - { - "id": "u1", - "tenant_id": "t1", - "credential": mock.MagicMock() - }, - { - "id": "u2", - "tenant_id": "t2", - "credential": mock.MagicMock() - } - ] - }) - mock_swift = mock_clients.return_value.swift.return_value - mock_swift.put_container.side_effect = [Exception, True, - Exception, Exception] - objects_ctx = objects.SwiftObjectGenerator(context) - self.assertRaisesRegex(exceptions.ContextSetupFailure, - "containers, expected 4 but got 1", - objects_ctx.setup) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_setup_failure_clients_put_object(self, mock_clients): - context = test.get_test_context() - context.update({ - "tenants": { - "t1": {"name": "t1_name"}, - "t2": {"name": "t2_name"} - }, - "users": [ - { - "id": "u1", - "tenant_id": "t1", - "credential": mock.MagicMock() - }, - { - "id": "u2", - "tenant_id": "t2", - "credential": mock.MagicMock() - } - ] - }) - mock_swift = mock_clients.return_value.swift.return_value - mock_swift.put_object.side_effect = [Exception, True] - objects_ctx = objects.SwiftObjectGenerator(context) - self.assertRaisesRegex(exceptions.ContextSetupFailure, - "objects, expected 2 but got 1", - objects_ctx.setup) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_cleanup_failure_clients_delete_container(self, mock_clients): - context = test.get_test_context() - context.update({ - "tenants": { - "t1": { - "name": "t1_name", - "containers": [ - {"user": {"id": "u1", "tenant_id": "t1", - "credential": mock.MagicMock()}, - "container": "coooon", - "objects": []}] * 3 - } - } - }) - mock_swift = mock_clients.return_value.swift.return_value - mock_swift.delete_container.side_effect = [True, True, Exception] - objects_ctx = objects.SwiftObjectGenerator(context) - objects_ctx.cleanup() - self.assertEqual(1, len(context["tenants"]["t1"]["containers"])) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_cleanup_failure_clients_delete_object(self, mock_clients): - context = test.get_test_context() - context.update({ - "tenants": { - "t1": { - "name": "t1_name", - "containers": [ - {"user": {"id": "u1", "tenant_id": "t1", - "credential": mock.MagicMock()}, - "container": "c1", - "objects": ["oooo"] * 3} - ] - } - } - }) - mock_swift = mock_clients.return_value.swift.return_value - mock_swift.delete_object.side_effect = [True, Exception, True] - objects_ctx = objects.SwiftObjectGenerator(context) - objects_ctx._delete_containers = mock.MagicMock() - objects_ctx.cleanup() - self.assertEqual( - 1, sum([len(container["objects"]) - for container in context["tenants"]["t1"]["containers"]])) diff --git a/tests/unit/plugins/openstack/context/swift/test_utils.py b/tests/unit/plugins/openstack/context/swift/test_utils.py deleted file mode 100644 index 491435241e..0000000000 --- a/tests/unit/plugins/openstack/context/swift/test_utils.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.swift import utils -from tests.unit import test - - -class SwiftObjectMixinTestCase(test.TestCase): - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test__create_containers(self, mock_clients): - tenants = 2 - containers_per_tenant = 2 - context = test.get_test_context() - c = [mock.MagicMock(), mock.MagicMock()] - context.update({ - "tenants": { - "1001": {"name": "t1_name"}, - "1002": {"name": "t2_name"} - }, - "users": [ - {"id": "u1", "tenant_id": "1001", "credential": c[0]}, - {"id": "u2", "tenant_id": "1002", "credential": c[1]} - ] - }) - - mixin = utils.SwiftObjectMixin() - containers = mixin._create_containers(context, containers_per_tenant, - 15) - - self.assertEqual(tenants * containers_per_tenant, len(containers)) - for index, container in enumerate(sorted(containers)): - offset = int(index / containers_per_tenant) + 1 - self.assertEqual(str(1000 + offset), container[0]) - - for index, tenant_id in enumerate(sorted(context["tenants"]), start=1): - containers = context["tenants"][tenant_id]["containers"] - self.assertEqual(containers_per_tenant, len(containers)) - for container in containers: - self.assertEqual("u%d" % index, container["user"]["id"]) - self.assertEqual(c[index - 1], - container["user"]["credential"]) - self.assertEqual(0, len(container["objects"])) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test__create_objects(self, mock_clients): - tenants = 2 - containers_per_tenant = 1 - objects_per_container = 5 - context = test.get_test_context() - context.update({ - "tenants": { - "1001": { - "name": "t1_name", - "containers": [ - {"user": { - "id": "u1", "tenant_id": "1001", - "credential": mock.MagicMock()}, - "container": "c1", - "objects": []} - ] - }, - "1002": { - "name": "t2_name", - "containers": [ - {"user": { - "id": "u2", "tenant_id": "1002", - "credential": mock.MagicMock()}, - "container": "c2", - "objects": []} - ] - } - } - }) - - mixin = utils.SwiftObjectMixin() - objects_list = mixin._create_objects(context, objects_per_container, - 1024, 25) - - self.assertEqual( - tenants * containers_per_tenant * objects_per_container, - len(objects_list)) - chunk = containers_per_tenant * objects_per_container - for index, obj in enumerate(sorted(objects_list)): - offset = int(index / chunk) + 1 - self.assertEqual(str(1000 + offset), obj[0]) - self.assertEqual("c%d" % offset, obj[1]) - - for tenant_id in context["tenants"]: - for container in context["tenants"][tenant_id]["containers"]: - self.assertEqual(objects_per_container, - len(container["objects"])) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test__delete_containers(self, mock_clients): - context = test.get_test_context() - context.update({ - "tenants": { - "1001": { - "name": "t1_name", - "containers": [ - {"user": { - "id": "u1", "tenant_id": "1001", - "credential": mock.MagicMock()}, - "container": "c1", - "objects": []} - ] - }, - "1002": { - "name": "t2_name", - "containers": [ - {"user": { - "id": "u2", "tenant_id": "1002", - "credential": mock.MagicMock()}, - "container": "c2", - "objects": []} - ] - } - } - }) - - mixin = utils.SwiftObjectMixin() - mixin._delete_containers(context, 1) - - mock_swift = mock_clients.return_value.swift.return_value - expected_containers = ["c1", "c2"] - mock_swift.delete_container.assert_has_calls( - [mock.call(con) for con in expected_containers], any_order=True) - - for tenant_id in context["tenants"]: - self.assertEqual(0, - len(context["tenants"][tenant_id]["containers"])) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test__delete_objects(self, mock_clients): - context = test.get_test_context() - context.update({ - "tenants": { - "1001": { - "name": "t1_name", - "containers": [ - {"user": { - "id": "u1", "tenant_id": "1001", - "credential": mock.MagicMock()}, - "container": "c1", - "objects": ["o1", "o2", "o3"]} - ] - }, - "1002": { - "name": "t2_name", - "containers": [ - {"user": { - "id": "u2", "tenant_id": "1002", - "credential": mock.MagicMock()}, - "container": "c2", - "objects": ["o4", "o5", "o6"]} - ] - } - } - }) - - mixin = utils.SwiftObjectMixin() - mixin._delete_objects(context, 1) - - mock_swift = mock_clients.return_value.swift.return_value - expected_objects = [("c1", "o1"), ("c1", "o2"), ("c1", "o3"), - ("c2", "o4"), ("c2", "o5"), ("c2", "o6")] - mock_swift.delete_object.assert_has_calls( - [mock.call(con, obj) for con, obj in expected_objects], - any_order=True) - - for tenant_id in context["tenants"]: - for container in context["tenants"][tenant_id]["containers"]: - self.assertEqual(0, len(container["objects"])) diff --git a/tests/unit/plugins/openstack/context/test_api_versions.py b/tests/unit/plugins/openstack/context/test_api_versions.py deleted file mode 100644 index dab771f438..0000000000 --- a/tests/unit/plugins/openstack/context/test_api_versions.py +++ /dev/null @@ -1,100 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.common import utils -from rally import exceptions -from rally.plugins.openstack.context import api_versions -from rally.task import context -from tests.unit import test - - -@ddt.ddt -class OpenStackServicesTestCase(test.TestCase): - - def setUp(self): - super(OpenStackServicesTestCase, self).setUp() - self.mock_clients = mock.patch( - "rally.plugins.openstack.osclients.Clients").start() - osclient_kc = self.mock_clients.return_value.keystone - self.mock_kc = osclient_kc.return_value - self.service_catalog = osclient_kc.service_catalog - self.service_catalog.get_endpoints.return_value = [] - self.mock_kc.services.list.return_value = [] - - @ddt.data(({"nova": {"service_type": "compute", "version": 2}, - "cinder": {"service_name": "cinderv2", "version": 2}, - "neutron": {"service_type": "network"}, - "glance": {"service_name": "glance"}, - "heat": {"version": 1}}, True), - ({"nova": {"service_type": "compute", - "service_name": "nova"}}, False), - ({"keystone": {"service_type": "foo"}}, False), - ({"nova": {"version": "foo"}}, False), - ({}, False)) - @ddt.unpack - def test_validate(self, config, valid): - results = context.Context.validate("api_versions", None, None, config) - if valid: - self.assertEqual([], results) - else: - self.assertGreater(len(results), 0) - - def test_setup_with_wrong_service_name(self): - context_obj = { - "config": {api_versions.OpenStackAPIVersions.get_fullname(): { - "nova": {"service_name": "service_name"}}}, - "admin": {"credential": mock.MagicMock()}, - "users": [{"credential": mock.MagicMock()}]} - ctx = api_versions.OpenStackAPIVersions(context_obj) - self.assertRaises(exceptions.ValidationError, ctx.setup) - self.service_catalog.get_endpoints.assert_called_once_with() - self.mock_kc.services.list.assert_called_once_with() - - def test_setup_with_wrong_service_name_and_without_admin(self): - context_obj = { - "config": {api_versions.OpenStackAPIVersions.get_fullname(): { - "nova": {"service_name": "service_name"}}}, - "users": [{"credential": mock.MagicMock()}]} - ctx = api_versions.OpenStackAPIVersions(context_obj) - self.assertRaises(exceptions.ContextSetupFailure, ctx.setup) - self.service_catalog.get_endpoints.assert_called_once_with() - self.assertFalse(self.mock_kc.services.list.called) - - def test_setup_with_wrong_service_type(self): - context_obj = { - "config": {api_versions.OpenStackAPIVersions.get_fullname(): { - "nova": {"service_type": "service_type"}}}, - "users": [{"credential": mock.MagicMock()}]} - ctx = api_versions.OpenStackAPIVersions(context_obj) - self.assertRaises(exceptions.ValidationError, ctx.setup) - self.service_catalog.get_endpoints.assert_called_once_with() - - def test_setup_with_service_name(self): - self.mock_kc.services.list.return_value = [ - utils.Struct(type="computev21", name="NovaV21")] - name = api_versions.OpenStackAPIVersions.get_fullname() - context = { - "config": {name: {"nova": {"service_name": "NovaV21"}}}, - "admin": {"credential": mock.MagicMock()}, - "users": [{"credential": mock.MagicMock()}]} - ctx = api_versions.OpenStackAPIVersions(context) - ctx.setup() - - self.service_catalog.get_endpoints.assert_called_once_with() - self.mock_kc.services.list.assert_called_once_with() - - self.assertEqual( - "computev21", - ctx.context["config"]["api_versions"]["nova"]["service_type"]) diff --git a/tests/unit/plugins/openstack/context/vm/__init__.py b/tests/unit/plugins/openstack/context/vm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/vm/test_custom_image.py b/tests/unit/plugins/openstack/context/vm/test_custom_image.py deleted file mode 100644 index ae9a90c6e9..0000000000 --- a/tests/unit/plugins/openstack/context/vm/test_custom_image.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.vm import custom_image -from rally.task import context -from tests.unit import test - - -BASE = "rally.plugins.openstack.context.vm.custom_image" - - -@context.configure(name="test_custom_image", order=500) -class FakeImageGenerator(custom_image.BaseCustomImageGenerator): - def _customize_image(self, *args): - pass - - -class BaseCustomImageContextVMTestCase(test.TestCase): - - def setUp(self): - super(BaseCustomImageContextVMTestCase, self).setUp() - - self.context = test.get_test_context() - self.context.update({ - "config": { - "test_custom_image": { - "image": {"name": "image"}, - "flavor": {"name": "flavor"}, - "username": "fedora", - "floating_network": "floating", - "port": 1022, - } - }, - "admin": { - "credential": mock.Mock(), - }, - "users": [ - {"tenant_id": "tenant_id0"}, - {"tenant_id": "tenant_id1"}, - {"tenant_id": "tenant_id2"} - ], - "tenants": { - "tenant_id0": {}, - "tenant_id1": {}, - "tenant_id2": {} - } - }) - - @mock.patch("%s.osclients.Clients" % BASE) - @mock.patch("%s.types.GlanceImage" % BASE) - @mock.patch("%s.types.Flavor" % BASE) - @mock.patch("%s.vmtasks.BootRuncommandDelete" % BASE) - def test_create_one_image( - self, mock_boot_runcommand_delete, mock_flavor, - mock_glance_image, mock_clients): - mock_flavor.return_value.pre_process.return_value = "flavor" - mock_glance_image.return_value.pre_process.return_value = "image" - ip = {"ip": "foo_ip", "id": "foo_id", "is_floating": True} - fake_server = mock.Mock() - - fake_image = {"id": "image"} - - scenario = mock_boot_runcommand_delete.return_value = mock.MagicMock( - _create_image=mock.MagicMock(return_value=fake_image), - _boot_server_with_fip=mock.MagicMock( - return_value=(fake_server, ip)) - ) - generator_ctx = FakeImageGenerator(self.context) - generator_ctx._customize_image = mock.MagicMock() - - user = { - "credential": "credential", - "keypair": {"name": "keypair_name"}, - "secgroup": {"name": "secgroup_name"} - } - - custom_image = generator_ctx.create_one_image(user, - foo_arg="foo_value") - self.assertEqual({"id": "image"}, custom_image) - - mock_flavor.assert_called_once_with(self.context) - mock_flavor.return_value.pre_process.assert_called_once_with( - resource_spec={"name": "flavor"}, config={}) - mock_glance_image.assert_called_once_with(self.context) - mock_glance_image.return_value.pre_process.assert_called_once_with( - resource_spec={"name": "image"}, config={}) - mock_boot_runcommand_delete.assert_called_once_with( - self.context, clients=mock_clients.return_value) - - scenario._boot_server_with_fip.assert_called_once_with( - image="image", flavor="flavor", - floating_network="floating", - key_name="keypair_name", security_groups=["secgroup_name"], - userdata=None, foo_arg="foo_value") - - scenario._stop_server.assert_called_once_with(fake_server) - - generator_ctx._customize_image.assert_called_once_with( - fake_server, ip, user) - - scenario._create_image.assert_called_once_with(fake_server) - - scenario._delete_server_with_fip.assert_called_once_with( - fake_server, ip) - - @mock.patch("%s.image.Image" % BASE) - def test_delete_one_image(self, mock_image): - generator_ctx = FakeImageGenerator(self.context) - - credential = mock.Mock() - user = {"credential": credential, - "keypair": {"name": "keypair_name"}} - custom_image = mock.Mock(id="image") - - generator_ctx.delete_one_image(user, custom_image) - - mock_image.return_value.delete_image.assert_called_once_with("image") - - @mock.patch("%s.image.Image" % BASE) - def test_setup_admin(self, mock_image): - self.context["tenants"]["tenant_id0"]["networks"] = [ - {"id": "network_id"}] - - generator_ctx = FakeImageGenerator(self.context) - - image = mock.Mock(id="custom_image") - - generator_ctx.create_one_image = mock.Mock(return_value=image) - - generator_ctx.setup() - - mock_image.return_value.set_visibility.assert_called_once_with( - image.id) - - generator_ctx.create_one_image.assert_called_once_with( - self.context["users"][0], nics=[{"net-id": "network_id"}]) - - def test_cleanup_admin(self): - tenant = self.context["tenants"]["tenant_id0"] - custom_image = tenant["custom_image"] = {"id": "image"} - - generator_ctx = FakeImageGenerator(self.context) - - generator_ctx.delete_one_image = mock.Mock() - - generator_ctx.cleanup() - - generator_ctx.delete_one_image.assert_called_once_with( - self.context["users"][0], custom_image) - - def test_setup(self): - self.context.pop("admin") - - generator_ctx = FakeImageGenerator(self.context) - - generator_ctx.create_one_image = mock.Mock( - side_effect=["custom_image0", "custom_image1", "custom_image2"]) - - generator_ctx.setup() - - self.assertEqual( - [mock.call(user) for user in self.context["users"]], - generator_ctx.create_one_image.mock_calls) - - for i in range(3): - self.assertEqual( - "custom_image%d" % i, - self.context["tenants"]["tenant_id%d" % i]["custom_image"] - ) - - def test_cleanup(self): - self.context.pop("admin") - - for i in range(3): - self.context["tenants"]["tenant_id%d" % i]["custom_image"] = { - "id": "custom_image%d" % i} - - generator_ctx = FakeImageGenerator(self.context) - generator_ctx.delete_one_image = mock.Mock() - - generator_ctx.cleanup() - - self.assertEqual( - [mock.call(self.context["users"][i], - {"id": "custom_image%d" % i}) for i in range(3)], - generator_ctx.delete_one_image.mock_calls) diff --git a/tests/unit/plugins/openstack/context/vm/test_image_command_customizer.py b/tests/unit/plugins/openstack/context/vm/test_image_command_customizer.py deleted file mode 100644 index e6c4d2e03c..0000000000 --- a/tests/unit/plugins/openstack/context/vm/test_image_command_customizer.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for the image customizer using a command execution.""" - -import mock - -from rally import exceptions -from rally.plugins.openstack.context.vm import image_command_customizer -from tests.unit import test - -BASE = "rally.plugins.openstack.context.vm.image_command_customizer" - - -class ImageCommandCustomizerContextVMTestCase(test.TestCase): - - def setUp(self): - super(ImageCommandCustomizerContextVMTestCase, self).setUp() - - self.context = { - "task": mock.MagicMock(), - "config": { - "image_command_customizer": { - "image": {"name": "image"}, - "flavor": {"name": "flavor"}, - "username": "fedora", - "password": "foo_password", - "floating_network": "floating", - "port": 1022, - "command": { - "interpreter": "foo_interpreter", - "script_file": "foo_script" - } - } - }, - "admin": { - "credential": "credential", - } - } - - self.user = {"keypair": {"private": "foo_private"}} - self.fip = {"ip": "foo_ip"} - - @mock.patch("%s.vm_utils.VMScenario" % BASE) - def test_customize_image(self, mock_vm_scenario): - mock_vm_scenario.return_value._run_command.return_value = ( - 0, "foo_stdout", "foo_stderr") - - customizer = image_command_customizer.ImageCommandCustomizerContext( - self.context) - - retval = customizer.customize_image(server=None, ip=self.fip, - user=self.user) - - mock_vm_scenario.assert_called_once_with(customizer.context) - mock_vm_scenario.return_value._run_command.assert_called_once_with( - "foo_ip", 1022, "fedora", "foo_password", pkey="foo_private", - command={"interpreter": "foo_interpreter", - "script_file": "foo_script"}) - - self.assertEqual((0, "foo_stdout", "foo_stderr"), retval) - - @mock.patch("%s.vm_utils.VMScenario" % BASE) - def test_customize_image_fail(self, mock_vm_scenario): - mock_vm_scenario.return_value._run_command.return_value = ( - 1, "foo_stdout", "foo_stderr") - - customizer = image_command_customizer.ImageCommandCustomizerContext( - self.context) - - exc = self.assertRaises( - exceptions.ScriptError, customizer.customize_image, - server=None, ip=self.fip, user=self.user) - - str_exc = str(exc) - self.assertIn("foo_stdout", str_exc) - self.assertIn("foo_stderr", str_exc) - - mock_vm_scenario.return_value._run_command.assert_called_once_with( - "foo_ip", 1022, "fedora", "foo_password", pkey="foo_private", - command={"interpreter": "foo_interpreter", - "script_file": "foo_script"}) diff --git a/tests/unit/plugins/openstack/context/watcher/__init__.py b/tests/unit/plugins/openstack/context/watcher/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/context/watcher/test_audit_templates.py b/tests/unit/plugins/openstack/context/watcher/test_audit_templates.py deleted file mode 100644 index 3ab1450e79..0000000000 --- a/tests/unit/plugins/openstack/context/watcher/test_audit_templates.py +++ /dev/null @@ -1,92 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.context.watcher import audit_templates -from rally.plugins.openstack.scenarios.watcher import utils as watcher_utils -from tests.unit import test - - -CTX = "rally.plugins.openstack.context.watcher" -SCN = "rally.plugins.openstack.scenarios.watcher" -TYP = "rally.plugins.openstack.types" - - -class AuditTemplateTestCase(test.ScenarioTestCase): - - @mock.patch("%s.utils.WatcherScenario._create_audit_template" % SCN, - return_value=mock.MagicMock()) - @mock.patch("%s.WatcherStrategy" % TYP,) - @mock.patch("%s.WatcherGoal" % TYP) - def test_setup(self, mock_watcher_goal, mock_watcher_strategy, - mock_watcher_scenario__create_audit_template): - - users = [{"id": 1, "tenant_id": 1, "credential": mock.MagicMock()}] - self.context.update({ - "config": { - "audit_templates": { - "audit_templates_per_admin": 1, - "fill_strategy": "random", - "params": [ - { - "goal": { - "name": "workload_balancing" - }, - "strategy": { - "name": "workload_stabilization" - } - }, - { - "goal": { - "name": "workload_balancing" - }, - "strategy": { - "name": "workload_stabilization" - } - } - ] - }, - }, - "admin": { - "credential": mock.MagicMock() - }, - "users": users - }) - audit_template = audit_templates.AuditTemplateGenerator(self.context) - audit_template.setup() - goal_id = mock_watcher_goal.return_value.pre_process.return_value - strategy_id = ( - mock_watcher_strategy.return_value.pre_process.return_value) - mock_calls = [mock.call(goal_id, strategy_id)] - mock_watcher_scenario__create_audit_template.assert_has_calls( - mock_calls) - - @mock.patch("%s.audit_templates.resource_manager.cleanup" % CTX) - def test_cleanup(self, mock_cleanup): - audit_templates_mocks = [mock.Mock() for i in range(2)] - self.context.update({ - "admin": { - "credential": mock.MagicMock() - }, - "audit_templates": audit_templates_mocks - }) - audit_templates_ctx = audit_templates.AuditTemplateGenerator( - self.context) - audit_templates_ctx.cleanup() - mock_cleanup.assert_called_once_with( - names=["watcher.action_plan", "watcher.audit_template"], - admin=self.context["admin"], - superclass=watcher_utils.WatcherScenario, - task_id=self.context["owner_id"]) diff --git a/tests/unit/plugins/openstack/embedcharts/test_osprofilerchart.py b/tests/unit/plugins/openstack/embedcharts/test_osprofilerchart.py deleted file mode 100644 index d2357a4889..0000000000 --- a/tests/unit/plugins/openstack/embedcharts/test_osprofilerchart.py +++ /dev/null @@ -1,56 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from rally.plugins.openstack.embedcharts.osprofilerchart import OSProfilerChart -from tests.unit import test - - -class OSProfilerChartTestCase(test.TestCase): - - class OSProfilerChart(OSProfilerChart): - widget = "OSProfiler" - - @mock.patch("osprofiler.drivers.base.get_driver") - def test_get_osprofiler_data(self, mock_get_driver): - engine = mock.Mock() - attrs = {"get_report.return_value": "html"} - engine.configure_mock(**attrs) - mock_get_driver.return_value = engine - - data = {"data": {"conn_str": "a", "trace_id": ["1"]}, "title": "a"} - return_data = OSProfilerChart.render_complete_data(data) - self.assertEqual("EmbedChart", return_data["widget"]) - self.assertEqual("a : 1", return_data["title"]) - - data = {"data": {"conn_str": None, "trace_id": ["1"]}, "title": "a"} - return_data = OSProfilerChart.render_complete_data(data) - self.assertEqual("TextArea", return_data["widget"]) - self.assertEqual(["1"], return_data["data"]) - self.assertEqual("a", return_data["title"]) - - mock_get_driver.side_effect = Exception - data = {"data": {"conn_str": "a", "trace_id": ["1"]}, "title": "a"} - return_data = OSProfilerChart.render_complete_data(data) - self.assertEqual("TextArea", return_data["widget"]) - self.assertEqual(["1"], return_data["data"]) - self.assertEqual("a", return_data["title"]) - - def test_datetime_json_serialize(self): - from rally.plugins.openstack.embedcharts.osprofilerchart \ - import _datetime_json_serialize - A = mock.Mock() - B = A.isoformat() - self.assertEqual(B, _datetime_json_serialize(A)) - self.assertEqual("C", _datetime_json_serialize("C")) diff --git a/tests/unit/plugins/openstack/hook/__init__.py b/tests/unit/plugins/openstack/hook/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/hook/test_fault_injection.py b/tests/unit/plugins/openstack/hook/test_fault_injection.py deleted file mode 100644 index a53d7ed63d..0000000000 --- a/tests/unit/plugins/openstack/hook/test_fault_injection.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2016: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import ddt -import mock -import os_faults -from os_faults.api import error - -from rally import consts -from rally.plugins.openstack.hook import fault_injection -from rally.task import hook -from tests.unit import fakes -from tests.unit import test - - -@ddt.ddt -class FaultInjectionHookTestCase(test.TestCase): - - def setUp(self): - super(FaultInjectionHookTestCase, self).setUp() - self.task = {"deployment_uuid": "foo_uuid"} - - @ddt.data((dict(action="foo"), True), - (dict(action="foo", verify=True), True), - (dict(action=10), False), - (dict(action="foo", verify=10), False), - (dict(), False)) - @ddt.unpack - def test_config_schema(self, config, valid): - results = hook.HookAction.validate("fault_injection", None, None, - config) - if valid: - self.assertEqual([], results) - else: - self.assertEqual(1, len(results)) - - @mock.patch("rally.common.objects.Deployment.get") - @mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer) - def test_run(self, mock_timer, mock_deployment_get): - hook = fault_injection.FaultInjectionHook( - self.task, {"action": "foo", "verify": True}, - {"iteration": 1}) - - with mock.patch.object(os_faults, "human_api") as mock_human_api: - with mock.patch.object(os_faults, "connect") as mock_connect: - hook.run_sync() - - injector_inst = mock_connect.return_value - - mock_connect.assert_called_once_with(None) - mock_human_api.assert_called_once_with(injector_inst, "foo") - - self.assertEqual( - {"finished_at": fakes.FakeTimer().finish_timestamp(), - "started_at": fakes.FakeTimer().timestamp(), - "status": consts.HookStatus.SUCCESS, - "triggered_by": {"iteration": 1}}, - hook.result()) - injector_inst.verify.assert_called_once_with() - - @mock.patch("rally.common.objects.Deployment.get") - @mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer) - def test_run_extra_config(self, mock_timer, mock_deployment_get): - mock_deployment_get.return_value = { - "config": {"type": "ExistingCloud", - "extra": {"cloud_config": {"conf": "foo_config"}}}} - hook = fault_injection.FaultInjectionHook( - self.task, {"action": "foo"}, {"iteration": 1}) - - with mock.patch.object(os_faults, "human_api") as mock_human_api: - with mock.patch.object(os_faults, "connect") as mock_connect: - hook.run_sync() - - injector_inst = mock_connect.return_value - - mock_connect.assert_called_once_with({"conf": "foo_config"}) - mock_human_api.assert_called_once_with(injector_inst, "foo") - - self.assertEqual( - {"finished_at": fakes.FakeTimer().finish_timestamp(), - "started_at": fakes.FakeTimer().timestamp(), - "status": consts.HookStatus.SUCCESS, - "triggered_by": {"iteration": 1}}, - hook.result()) - - @mock.patch("rally.common.objects.Deployment.get") - @mock.patch("os_faults.human_api") - @mock.patch("os_faults.connect") - @mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer) - def test_run_error(self, mock_timer, mock_connect, mock_human_api, - mock_deployment_get): - injector_inst = mock_connect.return_value - mock_human_api.side_effect = error.OSFException("foo error") - hook = fault_injection.FaultInjectionHook( - self.task, {"action": "foo", "verify": True}, - {"iteration": 1}) - - hook.run_sync() - - self.assertEqual( - {"finished_at": fakes.FakeTimer().finish_timestamp(), - "started_at": fakes.FakeTimer().timestamp(), - "status": consts.HookStatus.FAILED, - "error": { - "details": mock.ANY, - "etype": "OSFException", - "msg": "foo error"}, - "triggered_by": {"iteration": 1}}, - hook.result()) - - mock_connect.assert_called_once_with(None) - injector_inst.verify.assert_called_once_with() - mock_human_api.assert_called_once_with(injector_inst, "foo") diff --git a/tests/unit/plugins/openstack/platforms/__init__.py b/tests/unit/plugins/openstack/platforms/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/platforms/test_existing.py b/tests/unit/plugins/openstack/platforms/test_existing.py deleted file mode 100644 index 8387574e34..0000000000 --- a/tests/unit/plugins/openstack/platforms/test_existing.py +++ /dev/null @@ -1,288 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import mock - -from rally.env import platform -from rally.plugins.openstack.platforms import existing -from tests.unit.env import test_platform - - -class ExistingPlatformTestCase(test_platform.PlatformBaseTestCase): - - def test_validate_spec_schema(self): - spec = { - "existing@openstack": { - "auth_url": "url", - "admin": { - "username": "admin", - "password": "password123", - "tenant_name": "admin" - }, - "users": [{ - "username": "admin", - "password": "password123", - "tenant_name": "admin" - }] - } - } - result = platform.Platform.validate("existing@openstack", {}, - spec, spec["existing@openstack"]) - self.assertEqual([], result) - - def test_validate_invalid_spec(self): - spec = { - "existing@openstack": { - "something_wrong": { - "username": "not_an_admin", - "password": "password123", - "project_name": "not_an_admin" - } - } - } - result = platform.Platform.validate("existing@openstack", {}, - spec, spec["existing@openstack"]) - self.assertNotEqual([], result) - - def test_create_users_only(self): - - spec = { - "auth_url": "https://best", - "endpoint": "check_that_its_poped", - "users": [ - {"project_name": "a", "username": "a", "password": "a"}, - {"project_name": "b", "username": "b", "password": "b"} - ] - } - - self.assertEqual( - ({ - "admin": None, - "users": [ - { - "auth_url": "https://best", "endpoint_type": None, - "region_name": None, - "domain_name": None, - "user_domain_name": "default", - "project_domain_name": "default", - "https_insecure": False, "https_cacert": None, - "tenant_name": "a", "username": "a", "password": "a" - }, - { - "auth_url": "https://best", "endpoint_type": None, - "region_name": None, - "domain_name": None, - "user_domain_name": "default", - "project_domain_name": "default", - "https_insecure": False, "https_cacert": None, - "tenant_name": "b", "username": "b", "password": "b" - } - ] - }, {}), - existing.OpenStack(spec).create()) - - def test_create_admin_only(self): - spec = { - "auth_url": "https://best", - "endpoint_type": "public", - "https_insecure": True, - "https_cacert": "/my.ca", - "profiler_hmac_key": "key", - "profiler_conn_str": "http://prof", - "admin": { - "domain_name": "d", "user_domain_name": "d", - "project_domain_name": "d", "project_name": "d", - "username": "d", "password": "d" - } - } - self.assertEqual( - ( - { - "admin": { - "auth_url": "https://best", - "endpoint_type": "public", - "https_insecure": True, "https_cacert": "/my.ca", - "profiler_hmac_key": "key", - "profiler_conn_str": "http://prof", - "region_name": None, "domain_name": "d", - "user_domain_name": "d", "project_domain_name": "d", - "tenant_name": "d", "username": "d", "password": "d" - }, - "users": [] - }, - {} - ), - existing.OpenStack(spec).create()) - - def test_create_spec_from_sys_environ(self): - # keystone v2 - sys_env = { - "OS_AUTH_URL": "https://example.com", - "OS_USERNAME": "user", - "OS_PASSWORD": "pass", - "OS_TENANT_NAME": "projectX", - "OS_INTERFACE": "publicURL", - "OS_REGION_NAME": "Region1", - "OS_CACERT": "Cacert", - "OS_INSECURE": True, - "OSPROFILER_HMAC_KEY": "key", - "OSPROFILER_CONN_STR": "https://example2.com", - } - - result = existing.OpenStack.create_spec_from_sys_environ(sys_env) - self.assertTrue(result["available"]) - self.assertEqual( - { - "admin": { - "username": "user", - "tenant_name": "projectX", - "password": "pass" - }, - "auth_url": "https://example.com", - "endpoint_type": "public", - "region_name": "Region1", - "https_cacert": "Cacert", - "https_insecure": True, - "profiler_hmac_key": "key", - "profiler_conn_str": "https://example2.com" - }, result["spec"]) - - # keystone v3 - sys_env["OS_IDENTITY_API_VERSION"] = "3" - - result = existing.OpenStack.create_spec_from_sys_environ(sys_env) - print(json.dumps(result["spec"], indent=4)) - self.assertEqual( - { - "admin": { - "username": "user", - "project_name": "projectX", - "user_domain_name": "Default", - "password": "pass", - "project_domain_name": "Default" - }, - "endpoint_type": "public", - "auth_url": "https://example.com", - "region_name": "Region1", - "https_cacert": "Cacert", - "https_insecure": True, - "profiler_hmac_key": "key", - "profiler_conn_str": "https://example2.com" - }, result["spec"]) - - def test_create_spec_from_sys_environ_fails_with_missing_vars(self): - sys_env = {"OS_AUTH_URL": "https://example.com"} - result = existing.OpenStack.create_spec_from_sys_environ(sys_env) - self.assertFalse(result["available"]) - self.assertIn("OS_USERNAME", result["message"]) - self.assertIn("OS_PASSWORD", result["message"]) - self.assertNotIn("OS_AUTH_URL", result["message"]) - - sys_env = {"OS_AUTH_URL": "https://example.com", - "OS_USERNAME": "user", - "OS_PASSWORD": "pass"} - result = existing.OpenStack.create_spec_from_sys_environ(sys_env) - self.assertFalse(result["available"]) - self.assertIn("OS_PROJECT_NAME or OS_TENANT_NAME", result["message"]) - - def test_destroy(self): - self.assertIsNone(existing.OpenStack({}).destroy()) - - def test_cleanup(self): - result1 = existing.OpenStack({}).cleanup() - result2 = existing.OpenStack({}).cleanup(task_uuid="any") - self.assertEqual(result1, result2) - self.assertEqual( - { - "message": "Coming soon!", - "discovered": 0, - "deleted": 0, - "failed": 0, - "resources": {}, - "errors": [] - }, - result1 - ) - self._check_cleanup_schema(result1) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_check_health(self, mock_clients): - pdata = { - "admin": mock.MagicMock(), - "users": [mock.MagicMock(), mock.MagicMock()] - } - result = existing.OpenStack({}, platform_data=pdata).check_health() - self._check_health_schema(result) - self.assertEqual({"available": True}, result) - mock_clients.assert_has_calls( - [mock.call(pdata["admin"]), mock.call().verified_keystone(), - mock.call(pdata["users"][0]), mock.call().keystone(), - mock.call(pdata["users"][1]), mock.call().keystone()]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_check_failed_admin(self, mock_clients): - mock_clients.return_value.verified_keystone.side_effect = Exception - pdata = {"admin": {"username": "balbab", "password": "12345"}} - result = existing.OpenStack({}, platform_data=pdata).check_health() - self._check_health_schema(result) - self.assertEqual( - {"available": False, - "message": - "Bad admin creds: \n%s" - % json.dumps({"username": "balbab", "password": "***"}, - indent=2, sort_keys=True), - "traceback": mock.ANY}, - result) - self.assertIn("Traceback (most recent call last)", result["traceback"]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_check_failed_users(self, mock_clients): - mock_clients.return_value.keystone.side_effect = Exception - pdata = {"admin": None, - "users": [{"username": "balbab", "password": "12345"}]} - result = existing.OpenStack({}, platform_data=pdata).check_health() - self._check_health_schema(result) - self.assertEqual( - {"available": False, - "message": - "Bad user creds: \n%s" - % json.dumps({"username": "balbab", "password": "***"}, - indent=2, sort_keys=True), - "traceback": mock.ANY}, - result) - self.assertIn("Traceback (most recent call last)", result["traceback"]) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_info(self, mock_clients): - mock_clients.return_value.services.return_value = { - "foo": "bar", - "volumev4": "__unknown__"} - platform_data = { - "admin": None, - "users": [{"username": "u1", "password": "123"}] - } - p = existing.OpenStack({}, platform_data=platform_data) - - result = p.info() - mock_clients.assert_called_once_with(platform_data["users"][0]) - mock_clients.return_value.services.assert_called_once_with() - self.assertEqual( - { - "info": { - "services": [{"type": "foo", "name": "bar"}, - {"type": "volumev4"}]}}, - result) - self._check_info_schema(result) diff --git a/tests/unit/plugins/openstack/scenarios/__init__.py b/tests/unit/plugins/openstack/scenarios/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/authenticate/__init__.py b/tests/unit/plugins/openstack/scenarios/authenticate/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/authenticate/test_authenticate.py b/tests/unit/plugins/openstack/scenarios/authenticate/test_authenticate.py deleted file mode 100644 index ec0646072c..0000000000 --- a/tests/unit/plugins/openstack/scenarios/authenticate/test_authenticate.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from rally.plugins.openstack.scenarios.authenticate import authenticate -from tests.unit import test - -import mock - - -class AuthenticateTestCase(test.ScenarioTestCase): - - def test_keystone(self): - scenario_inst = authenticate.Keystone() - scenario_inst.run() - self.assertTrue(self.client_created("keystone")) - self._test_atomic_action_timer(scenario_inst.atomic_actions(), - "authenticate.keystone") - - def test_validate_glance(self): - scenario_inst = authenticate.ValidateGlance() - scenario_inst.run(5) - - # NOTE(stpierre): We can't use assert_has_calls() here because - # that includes calls on the return values of the mock object - # as well. Glance (and Heat and Monasca, tested below) returns - # an iterator that the scenario wraps in list() in order to - # force glanceclient to actually make the API call, and this - # results in a bunch of call().__iter__() and call().__len__() - # calls that aren't matched if we use assert_has_calls(). - self.assertItemsEqual( - self.clients("glance").images.list.call_args_list, - [mock.call(name=mock.ANY)] * 5) - self._test_atomic_action_timer(scenario_inst.atomic_actions(), - "authenticate.validate_glance") - - def test_validate_nova(self): - scenario_inst = authenticate.ValidateNova() - scenario_inst.run(5) - self.clients("nova").flavors.list.assert_has_calls([mock.call()] * 5) - self._test_atomic_action_timer(scenario_inst.atomic_actions(), - "authenticate.validate_nova") - - def test_validate_ceilometer(self): - scenario_inst = authenticate.ValidateCeilometer() - scenario_inst.run(5) - self.clients("ceilometer").meters.list.assert_has_calls( - [mock.call()] * 5) - self._test_atomic_action_timer( - scenario_inst.atomic_actions(), - "authenticate.validate_ceilometer") - - def test_validate_cinder(self): - scenario_inst = authenticate.ValidateCinder() - scenario_inst.run(5) - self.clients("cinder").volume_types.list.assert_has_calls( - [mock.call()] * 5) - self._test_atomic_action_timer(scenario_inst.atomic_actions(), - "authenticate.validate_cinder") - - def test_validate_neutron(self): - scenario_inst = authenticate.ValidateNeutron() - scenario_inst.run(5) - self.clients("neutron").list_networks.assert_has_calls( - [mock.call()] * 5) - self._test_atomic_action_timer(scenario_inst.atomic_actions(), - "authenticate.validate_neutron") - - def test_validate_heat(self): - scenario_inst = authenticate.ValidateHeat() - scenario_inst.run(5) - self.assertItemsEqual( - self.clients("heat").stacks.list.call_args_list, - [mock.call(limit=0)] * 5) - self._test_atomic_action_timer(scenario_inst.atomic_actions(), - "authenticate.validate_heat") - - def test_validate_monasca(self): - scenario_inst = authenticate.ValidateMonasca() - scenario_inst.run(5) - self.assertItemsEqual( - self.clients("monasca").metrics.list.call_args_list, - [mock.call(limit=0)] * 5) - self._test_atomic_action_timer(scenario_inst.atomic_actions(), - "authenticate.validate_monasca") diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/__init__.py b/tests/unit/plugins/openstack/scenarios/ceilometer/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/test_alarms.py b/tests/unit/plugins/openstack/scenarios/ceilometer/test_alarms.py deleted file mode 100644 index 81d8c3aa1e..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ceilometer/test_alarms.py +++ /dev/null @@ -1,102 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.ceilometer import alarms -from tests.unit import test - - -class CeilometerAlarmsTestCase(test.ScenarioTestCase): - def test_create_alarm(self): - scenario = alarms.CreateAlarm(self.context) - - scenario._create_alarm = mock.MagicMock() - scenario.run("fake_meter_name", "fake_threshold", fakearg="f") - scenario._create_alarm.assert_called_once_with("fake_meter_name", - "fake_threshold", - {"fakearg": "f"}) - - def test_list_alarm(self): - scenario = alarms.ListAlarms(self.context) - - scenario._list_alarms = mock.MagicMock() - scenario.run() - scenario._list_alarms.assert_called_once_with() - - def test_create_and_list_alarm(self): - fake_alarm = mock.MagicMock() - scenario = alarms.CreateAndListAlarm(self.context) - - scenario._create_alarm = mock.MagicMock(return_value=fake_alarm) - scenario._list_alarms = mock.MagicMock() - scenario.run("fake_meter_name", "fake_threshold", fakearg="f") - scenario._create_alarm.assert_called_once_with("fake_meter_name", - "fake_threshold", - {"fakearg": "f"}) - scenario._list_alarms.assert_called_once_with(fake_alarm.alarm_id) - - def test_create_and_get_alarm(self): - fake_alarm = mock.MagicMock() - scenario = alarms.CreateAndGetAlarm(self.context) - - scenario._create_alarm = mock.MagicMock(return_value=fake_alarm) - scenario._get_alarm = mock.MagicMock() - scenario.run("fake_meter_name", "fake_threshold", fakearg="f") - scenario._create_alarm.assert_called_once_with("fake_meter_name", - "fake_threshold", - {"fakearg": "f"}) - scenario._get_alarm.assert_called_once_with(fake_alarm.alarm_id) - - def test_create_and_update_alarm(self): - fake_alram_dict_diff = {"description": "Changed Test Description"} - fake_alarm = mock.MagicMock() - scenario = alarms.CreateAndUpdateAlarm(self.context) - - scenario._create_alarm = mock.MagicMock(return_value=fake_alarm) - scenario._update_alarm = mock.MagicMock() - scenario.run("fake_meter_name", "fake_threshold", fakearg="f") - scenario._create_alarm.assert_called_once_with("fake_meter_name", - "fake_threshold", - {"fakearg": "f"}) - scenario._update_alarm.assert_called_once_with(fake_alarm.alarm_id, - fake_alram_dict_diff) - - def test_create_and_delete_alarm(self): - fake_alarm = mock.MagicMock() - scenario = alarms.CreateAndDeleteAlarm(self.context) - - scenario._create_alarm = mock.MagicMock(return_value=fake_alarm) - scenario._delete_alarm = mock.MagicMock() - scenario.run("fake_meter_name", "fake_threshold", fakearg="f") - scenario._create_alarm.assert_called_once_with("fake_meter_name", - "fake_threshold", - {"fakearg": "f"}) - scenario._delete_alarm.assert_called_once_with(fake_alarm.alarm_id) - - def test_create_and_get_alarm_history(self): - alarm = mock.Mock(alarm_id="foo_id") - scenario = alarms.CreateAlarmAndGetHistory( - self.context) - - scenario._create_alarm = mock.MagicMock(return_value=alarm) - scenario._get_alarm_state = mock.MagicMock() - scenario._get_alarm_history = mock.MagicMock() - scenario._set_alarm_state = mock.MagicMock() - scenario.run("meter_name", "threshold", "state", 60, fakearg="f") - scenario._create_alarm.assert_called_once_with( - "meter_name", "threshold", {"fakearg": "f"}) - scenario._get_alarm_state.assert_called_once_with("foo_id") - scenario._get_alarm_history.assert_called_once_with("foo_id") - scenario._set_alarm_state.assert_called_once_with(alarm, "state", 60) diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/test_events.py b/tests/unit/plugins/openstack/scenarios/ceilometer/test_events.py deleted file mode 100644 index 8ff74d4bdf..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ceilometer/test_events.py +++ /dev/null @@ -1,103 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.ceilometer import events -from tests.unit import test - - -class CeilometerEventsTestCase(test.ScenarioTestCase): - - def setUp(self): - super(CeilometerEventsTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.identity.identity.Identity") - self.addCleanup(patch.stop) - self.mock_identity = patch.start() - - def get_test_context(self): - context = super(CeilometerEventsTestCase, self).get_test_context() - context["admin"] = {"id": "fake_user_id", - "credential": mock.MagicMock() - } - return context - - def test_list_events(self): - scenario = events.CeilometerEventsCreateUserAndListEvents(self.context) - - scenario._list_events = mock.MagicMock() - - scenario.run() - - self.mock_identity.return_value.create_user.assert_called_once_with() - scenario._list_events.assert_called_once_with() - - def test_list_events_fails(self): - scenario = events.CeilometerEventsCreateUserAndListEvents(self.context) - - scenario._list_events = mock.MagicMock(return_value=[]) - - self.assertRaises(exceptions.RallyException, scenario.run) - - self.mock_identity.return_value.create_user.assert_called_once_with() - scenario._list_events.assert_called_once_with() - - def test_list_event_types(self): - scenario = events.CeilometerEventsCreateUserAndListEventTypes( - self.context) - - scenario._list_event_types = mock.MagicMock() - - scenario.run() - - self.mock_identity.return_value.create_user.assert_called_once_with() - scenario._list_event_types.assert_called_once_with() - - def test_list_event_types_fails(self): - scenario = events.CeilometerEventsCreateUserAndListEventTypes( - self.context) - - scenario._list_event_types = mock.MagicMock(return_value=[]) - - self.assertRaises(exceptions.RallyException, scenario.run) - - self.mock_identity.return_value.create_user.assert_called_once_with() - scenario._list_event_types.assert_called_once_with() - - def test_get_event(self): - scenario = events.CeilometerEventsCreateUserAndGetEvent(self.context) - - scenario._get_event = mock.MagicMock() - scenario._list_events = mock.MagicMock( - return_value=[mock.Mock(message_id="fake_id")]) - - scenario.run() - - self.mock_identity.return_value.create_user.assert_called_once_with() - scenario._list_events.assert_called_with() - scenario._get_event.assert_called_with(event_id="fake_id") - - def test_get_event_fails(self): - scenario = events.CeilometerEventsCreateUserAndGetEvent(self.context) - - scenario._list_events = mock.MagicMock(return_value=[]) - scenario._get_event = mock.MagicMock() - - self.assertRaises(exceptions.RallyException, scenario.run) - - self.mock_identity.return_value.create_user.assert_called_once_with() - scenario._list_events.assert_called_with() - self.assertFalse(scenario._get_event.called) diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/test_meters.py b/tests/unit/plugins/openstack/scenarios/ceilometer/test_meters.py deleted file mode 100644 index a5634e4512..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ceilometer/test_meters.py +++ /dev/null @@ -1,75 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.ceilometer import meters -from tests.unit import test - - -BASE = "rally.plugins.openstack.scenarios.ceilometer" - - -class CeilometerMetersTestCase(test.ScenarioTestCase): - @mock.patch("%s.meters.ListMatchedMeters.run" % BASE) - def test_all_meter_list_queries( - self, mock_list_matched_meters_run): - scenario = meters.ListMeters(self.context) - metadata_query = {"a": "test"} - limit = 100 - - scenario.run(metadata_query, limit) - - mock_list_matched_meters_run.assert_any_call(limit=100) - mock_list_matched_meters_run.assert_any_call( - metadata_query=metadata_query) - mock_list_matched_meters_run.assert_any_call(filter_by_user_id=True) - mock_list_matched_meters_run.assert_any_call(filter_by_project_id=True) - mock_list_matched_meters_run.assert_any_call( - filter_by_resource_id=True) - - @mock.patch("%s.meters.ListMatchedMeters.run" % BASE) - def test_meter_list_queries_without_limit_and_metadata( - self, mock_list_matched_meters_run): - - scenario = meters.ListMeters(self.context) - scenario.run() - expected_call_args_list = [ - mock.call(filter_by_project_id=True), - mock.call(filter_by_user_id=True), - mock.call(filter_by_resource_id=True) - ] - self.assertSequenceEqual( - expected_call_args_list, - mock_list_matched_meters_run.call_args_list) - - @mock.patch("%s.meters.ListMatchedMeters._list_meters" % BASE) - def test_list_matched_meters( - self, mock_list_matched_meters__list_meters): - mock_func = mock_list_matched_meters__list_meters - scenario = meters.ListMatchedMeters(self.context) - context = {"user": {"tenant_id": "fake", "id": "fake_id"}, - "tenant": {"id": "fake_id", - "resources": ["fake_resource"]}} - scenario.context = context - - metadata_query = {"a": "test"} - limit = 100 - scenario.run(True, True, True, metadata_query, limit) - mock_func.assert_called_once_with( - [{"field": "user_id", "value": "fake_id", "op": "eq"}, - {"field": "project_id", "value": "fake_id", "op": "eq"}, - {"field": "resource_id", "value": "fake_resource", "op": "eq"}, - {"field": "metadata.a", "value": "test", "op": "eq"}], - 100) diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/test_queries.py b/tests/unit/plugins/openstack/scenarios/ceilometer/test_queries.py deleted file mode 100644 index 825fa8c77d..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ceilometer/test_queries.py +++ /dev/null @@ -1,103 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import mock - -from rally.plugins.openstack.scenarios.ceilometer import queries -from tests.unit import test - - -class CeilometerQueriesTestCase(test.ScenarioTestCase): - def test_create_and_query_alarms(self): - scenario = queries.CeilometerQueriesCreateAndQueryAlarms(self.context) - - scenario._create_alarm = mock.MagicMock() - scenario._query_alarms = mock.MagicMock() - - scenario.run("fake_meter_name", 100, "fake_filter", - "fake_orderby_attribute", 10, fakearg="f") - scenario._create_alarm.assert_called_once_with("fake_meter_name", - 100, {"fakearg": "f"}) - scenario._query_alarms.assert_called_once_with( - json.dumps("fake_filter"), "fake_orderby_attribute", 10) - - def test_create_and_query_alarms_no_filter(self): - scenario = queries.CeilometerQueriesCreateAndQueryAlarms(self.context) - - scenario._create_alarm = mock.MagicMock() - scenario._query_alarms = mock.MagicMock() - - scenario.run("fake_meter_name", 100, None, "fake_orderby_attribute", - 10, fakearg="f") - scenario._create_alarm.assert_called_once_with("fake_meter_name", - 100, {"fakearg": "f"}) - scenario._query_alarms.assert_called_once_with( - None, "fake_orderby_attribute", 10) - - def test_create_and_query_alarm_history(self): - fake_alarm = mock.MagicMock() - fake_alarm.alarm_id = "fake_alarm_id" - scenario = queries.CeilometerQueriesCreateAndQueryAlarmHistory( - self.context) - - scenario._create_alarm = mock.MagicMock(return_value=fake_alarm) - scenario._query_alarm_history = mock.MagicMock() - - fake_filter = json.dumps({"=": {"alarm_id": fake_alarm.alarm_id}}) - scenario.run("fake_meter_name", 100, "fake_orderby_attribute", - 10, fakearg="f") - scenario._create_alarm.assert_called_once_with("fake_meter_name", 100, - {"fakearg": "f"}) - scenario._query_alarm_history.assert_called_once_with( - fake_filter, "fake_orderby_attribute", 10) - - def test_create_and_query_samples(self): - scenario = queries.CeilometerQueriesCreateAndQuerySamples(self.context) - - scenario._create_sample = mock.MagicMock() - scenario._query_samples = mock.MagicMock() - - scenario.run("fake_counter_name", "fake_counter_type", - "fake_counter_unit", "fake_counter_volume", - "fake_resource_id", "fake_filter", - "fake_orderby_attribute", 10, fakearg="f") - scenario._create_sample.assert_called_once_with("fake_counter_name", - "fake_counter_type", - "fake_counter_unit", - "fake_counter_volume", - "fake_resource_id", - fakearg="f") - scenario._query_samples.assert_called_once_with( - json.dumps("fake_filter"), "fake_orderby_attribute", 10) - - def test_create_and_query_samples_no_filter(self): - scenario = queries.CeilometerQueriesCreateAndQuerySamples(self.context) - - scenario._create_sample = mock.MagicMock() - scenario._query_samples = mock.MagicMock() - - scenario.run("fake_counter_name", "fake_counter_type", - "fake_counter_unit", "fake_counter_volume", - "fake_resource_id", None, - "fake_orderby_attribute", 10, fakearg="f") - scenario._create_sample.assert_called_once_with("fake_counter_name", - "fake_counter_type", - "fake_counter_unit", - "fake_counter_volume", - "fake_resource_id", - fakearg="f") - scenario._query_samples.assert_called_once_with( - None, "fake_orderby_attribute", 10) diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/test_resources.py b/tests/unit/plugins/openstack/scenarios/ceilometer/test_resources.py deleted file mode 100644 index 506f4cac50..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ceilometer/test_resources.py +++ /dev/null @@ -1,107 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.ceilometer import resources -from tests.unit import test - - -BASE = "rally.plugins.openstack.scenarios.ceilometer" - - -class CeilometerResourcesTestCase(test.ScenarioTestCase): - @mock.patch("%s.resources.ListMatchedResources.run" % BASE) - def test_all_resource_list_queries( - self, mock_list_matched_resources_run): - metadata_query = {"a": "test"} - start_time = "fake start time" - end_time = "fake end time" - limit = 100 - - scenario = resources.ListResources(self.context) - scenario.run(metadata_query, start_time, end_time, limit) - mock_list_matched_resources_run.assert_any_call(limit=100) - mock_list_matched_resources_run.assert_any_call(start_time=start_time, - end_time=end_time) - mock_list_matched_resources_run.assert_any_call(end_time=end_time) - mock_list_matched_resources_run.assert_any_call(start_time=start_time) - mock_list_matched_resources_run.assert_any_call( - metadata_query=metadata_query) - mock_list_matched_resources_run.assert_any_call( - filter_by_user_id=True) - mock_list_matched_resources_run.assert_any_call( - filter_by_project_id=True) - mock_list_matched_resources_run.assert_any_call( - filter_by_resource_id=True) - - def test_list_matched_resources(self): - scenario = resources.ListMatchedResources(self.context) - scenario._list_resources = mock.MagicMock() - context = {"user": {"tenant_id": "fake", "id": "fake_id"}, - "tenant": {"id": "fake_id", - "resources": ["fake_resource"]}} - scenario.context = context - - metadata_query = {"a": "test"} - start_time = "2015-09-09T00:00:00" - end_time = "2015-09-10T00:00:00" - limit = 100 - scenario.run(True, True, True, metadata_query, - start_time, end_time, limit) - scenario._list_resources.assert_called_once_with( - [{"field": "user_id", "value": "fake_id", "op": "eq"}, - {"field": "project_id", "value": "fake_id", "op": "eq"}, - {"field": "resource_id", "value": "fake_resource", "op": "eq"}, - {"field": "metadata.a", "value": "test", "op": "eq"}, - {"field": "timestamp", "value": "2015-09-09T00:00:00", - "op": ">="}, - {"field": "timestamp", "value": "2015-09-10T00:00:00", - "op": "<="} - ], - 100) - - def test_get_tenant_resources(self): - scenario = resources.GetTenantResources(self.context) - resource_list = ["id1", "id2", "id3", "id4"] - context = {"user": {"tenant_id": "fake"}, - "tenant": {"id": "fake", "resources": resource_list}} - scenario.context = context - scenario._get_resource = mock.MagicMock() - scenario.run() - for resource_id in resource_list: - scenario._get_resource.assert_any_call(resource_id) - - @mock.patch("%s.resources.ListMatchedResources.run" % BASE) - def test_resource_list_queries_without_limit_and_metadata( - self, mock_list_matched_resources_run): - scenario = resources.ListResources() - scenario.run() - expected_call_args_list = [ - mock.call(filter_by_project_id=True), - mock.call(filter_by_user_id=True), - mock.call(filter_by_resource_id=True) - ] - self.assertSequenceEqual( - expected_call_args_list, - mock_list_matched_resources_run.call_args_list) - - def test_get_tenant_resources_with_exception(self): - scenario = resources.GetTenantResources(self.context) - resource_list = [] - context = {"user": {"tenant_id": "fake"}, - "tenant": {"id": "fake", "resources": resource_list}} - scenario.context = context - self.assertRaises(exceptions.RallyAssertionError, scenario.run) diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/test_samples.py b/tests/unit/plugins/openstack/scenarios/ceilometer/test_samples.py deleted file mode 100644 index 366a88cc15..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ceilometer/test_samples.py +++ /dev/null @@ -1,72 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.ceilometer import samples -from tests.unit import test - - -BASE = "rally.plugins.openstack.scenarios.ceilometer" - - -class CeilometerSamplesTestCase(test.ScenarioTestCase): - - @mock.patch("%s.samples.ListMatchedSamples.run" % BASE) - def test_all_list_samples(self, mock_list_matched_samples_run): - metadata_query = {"a": "test"} - limit = 10 - scenario = samples.ListSamples(self.context) - scenario.run(metadata_query, limit) - mock_list_matched_samples_run.assert_any_call(limit=10) - mock_list_matched_samples_run.assert_any_call( - metadata_query=metadata_query) - mock_list_matched_samples_run.assert_any_call( - filter_by_resource_id=True) - mock_list_matched_samples_run.assert_any_call( - filter_by_user_id=True) - mock_list_matched_samples_run.assert_any_call( - filter_by_project_id=True) - - @mock.patch("%s.samples.ListMatchedSamples.run" % BASE) - def test_list_samples_without_limit_and_metadata( - self, - mock_list_matched_samples_run): - scenario = samples.ListSamples() - scenario.run() - expected_call_args_list = [ - mock.call(filter_by_project_id=True), - mock.call(filter_by_user_id=True), - mock.call(filter_by_resource_id=True) - ] - self.assertSequenceEqual( - expected_call_args_list, - mock_list_matched_samples_run.call_args_list) - - def test_list_matched_samples(self): - scenario = samples.ListMatchedSamples() - scenario._list_samples = mock.MagicMock() - context = {"user": {"tenant_id": "fake", "id": "fake_id"}, - "tenant": {"id": "fake_id", - "resources": ["fake_resource"]}} - scenario.context = context - metadata_query = {"a": "test"} - limit = 10 - scenario.run(True, True, True, metadata_query, limit) - scenario._list_samples.assert_called_once_with( - [{"field": "user_id", "value": "fake_id", "op": "eq"}, - {"field": "project_id", "value": "fake_id", "op": "eq"}, - {"field": "resource_id", "value": "fake_resource", "op": "eq"}, - {"field": "metadata.a", "value": "test", "op": "eq"}], - 10) diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/test_stats.py b/tests/unit/plugins/openstack/scenarios/ceilometer/test_stats.py deleted file mode 100644 index b5f0a97406..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ceilometer/test_stats.py +++ /dev/null @@ -1,45 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.ceilometer import stats -from tests.unit import test - - -class CeilometerStatsTestCase(test.ScenarioTestCase): - - def test_get_stats(self): - scenario = stats.GetStats(self.context) - scenario._get_stats = mock.MagicMock() - context = {"user": {"tenant_id": "fake", "id": "fake_id"}, - "tenant": {"id": "fake_id", - "resources": ["fake_resource"]}} - metadata_query = {"a": "test"} - period = 10 - groupby = "user_id" - aggregates = "sum" - scenario.context = context - scenario.run("fake_meter", True, True, True, metadata_query, - period, groupby, aggregates) - scenario._get_stats.assert_called_once_with( - "fake_meter", - [{"field": "user_id", "value": "fake_id", "op": "eq"}, - {"field": "project_id", "value": "fake_id", "op": "eq"}, - {"field": "resource_id", "value": "fake_resource", "op": "eq"}, - {"field": "metadata.a", "value": "test", "op": "eq"}], - 10, - "user_id", - "sum" - ) diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/test_traits.py b/tests/unit/plugins/openstack/scenarios/ceilometer/test_traits.py deleted file mode 100644 index 35344da67e..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ceilometer/test_traits.py +++ /dev/null @@ -1,69 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.ceilometer import traits -from tests.unit import test - - -class CeilometerTraitsTestCase(test.ScenarioTestCase): - - def setUp(self): - super(CeilometerTraitsTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.identity.identity.Identity") - self.addCleanup(patch.stop) - self.mock_identity = patch.start() - - def get_test_context(self): - context = super(CeilometerTraitsTestCase, self).get_test_context() - context["admin"] = {"id": "fake_user_id", - "credential": mock.MagicMock() - } - return context - - def test_list_traits(self): - scenario = traits.CreateUserAndListTraits(self.context) - - scenario._list_event_traits = mock.MagicMock() - scenario._list_events = mock.MagicMock( - return_value=[mock.Mock( - event_type="fake_event_type", - traits=[{"name": "fake_trait_name"}]) - ]) - - scenario.run() - - self.mock_identity.return_value.create_user.assert_called_once_with() - scenario._list_events.assert_called_with() - scenario._list_event_traits.assert_called_once_with( - event_type="fake_event_type", trait_name="fake_trait_name") - - def test_list_trait_descriptions(self): - scenario = traits.CreateUserAndListTraitDescriptions( - self.context) - - scenario._list_event_trait_descriptions = mock.MagicMock() - scenario._list_events = mock.MagicMock( - return_value=[mock.Mock( - event_type="fake_event_type") - ]) - - scenario.run() - - self.mock_identity.return_value.create_user.assert_called_once_with() - scenario._list_events.assert_called_with() - scenario._list_event_trait_descriptions.assert_called_once_with( - event_type="fake_event_type") diff --git a/tests/unit/plugins/openstack/scenarios/ceilometer/test_utils.py b/tests/unit/plugins/openstack/scenarios/ceilometer/test_utils.py deleted file mode 100644 index bf1e76fec6..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ceilometer/test_utils.py +++ /dev/null @@ -1,398 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import datetime as dt - -from dateutil import parser -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.ceilometer import utils -from tests.unit import test - -CEILOMETER_UTILS = "rally.plugins.openstack.scenarios.ceilometer.utils" - - -class CeilometerScenarioTestCase(test.ScenarioTestCase): - def setUp(self): - super(CeilometerScenarioTestCase, self).setUp() - self.scenario = utils.CeilometerScenario(self.context) - - @mock.patch("%s.uuid.uuid4" % CEILOMETER_UTILS) - def test__make_samples_no_batch_size(self, mock_uuid4): - mock_uuid4.return_value = "fake_uuid" - test_timestamp = dt.datetime(2015, 10, 20, 14, 18, 40) - result = list(self.scenario._make_samples(count=2, interval=60, - timestamp=test_timestamp)) - self.assertEqual(1, len(result)) - expected = {"counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 1, - "resource_id": "fake_uuid", - "timestamp": test_timestamp.isoformat()} - self.assertEqual(expected, result[0][0]) - samples_int = (parser.parse(result[0][0]["timestamp"]) - - parser.parse(result[0][1]["timestamp"])).seconds - self.assertEqual(60, samples_int) - - @mock.patch("%s.uuid.uuid4" % CEILOMETER_UTILS) - def test__make_samples_batch_size(self, mock_uuid4): - mock_uuid4.return_value = "fake_uuid" - test_timestamp = dt.datetime(2015, 10, 20, 14, 18, 40) - result = list(self.scenario._make_samples(count=4, interval=60, - batch_size=2, - timestamp=test_timestamp)) - self.assertEqual(2, len(result)) - expected = {"counter_name": "cpu_util", - "counter_type": "gauge", - "counter_unit": "%", - "counter_volume": 1, - "resource_id": "fake_uuid", - "timestamp": test_timestamp.isoformat()} - self.assertEqual(expected, result[0][0]) - samples_int = (parser.parse(result[0][-1]["timestamp"]) - - parser.parse(result[1][0]["timestamp"])).seconds - # NOTE(idegtiarov): here we check that interval between last sample in - # first batch and first sample in second batch is equal 60 sec. - self.assertEqual(60, samples_int) - - def test__make_timestamp_query(self): - start_time = "2015-09-09T00:00:00" - end_time = "2015-09-10T00:00:00" - expected_start = [ - {"field": "timestamp", "value": "2015-09-09T00:00:00", - "op": ">="}] - expected_end = [ - {"field": "timestamp", "value": "2015-09-10T00:00:00", - "op": "<="} - ] - - actual = self.scenario._make_timestamp_query(start_time, end_time) - self.assertEqual(expected_start + expected_end, actual) - self.assertRaises(exceptions.InvalidArgumentsException, - self.scenario._make_timestamp_query, - end_time, start_time) - self.assertEqual( - expected_start, - self.scenario._make_timestamp_query(start_time=start_time)) - self.assertEqual( - expected_end, - self.scenario._make_timestamp_query(end_time=end_time)) - - def test__list_alarms_by_id(self): - self.assertEqual(self.clients("ceilometer").alarms.get.return_value, - self.scenario._list_alarms("alarm-id")) - self.clients("ceilometer").alarms.get.assert_called_once_with( - "alarm-id") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.list_alarms") - - def test__list_alarms(self): - self.assertEqual(self.clients("ceilometer").alarms.list.return_value, - self.scenario._list_alarms()) - self.clients("ceilometer").alarms.list.assert_called_once_with() - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.list_alarms") - - def test__get_alarm(self): - self.assertEqual(self.clients("ceilometer").alarms.get.return_value, - self.scenario._get_alarm("alarm-id")) - self.clients("ceilometer").alarms.get.assert_called_once_with( - "alarm-id") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.get_alarm") - - def test__create_alarm(self): - alarm_dict = {"alarm_id": "fake-alarm-id"} - orig_alarm_dict = copy.copy(alarm_dict) - self.scenario.generate_random_name = mock.Mock() - self.assertEqual(self.scenario._create_alarm("fake-meter-name", 100, - alarm_dict), - self.clients("ceilometer").alarms.create.return_value) - self.clients("ceilometer").alarms.create.assert_called_once_with( - meter_name="fake-meter-name", - threshold=100, - description="Test Alarm", - alarm_id="fake-alarm-id", - name=self.scenario.generate_random_name.return_value) - # ensure that _create_alarm() doesn't modify the alarm dict as - # a side-effect - self.assertEqual(alarm_dict, orig_alarm_dict) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.create_alarm") - - def test__delete_alarms(self): - self.scenario._delete_alarm("alarm-id") - self.clients("ceilometer").alarms.delete.assert_called_once_with( - "alarm-id") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.delete_alarm") - - def test__update_alarm(self): - alarm_diff = {"description": "Changed Test Description"} - orig_alarm_diff = copy.copy(alarm_diff) - self.scenario._update_alarm("alarm-id", alarm_diff) - self.clients("ceilometer").alarms.update.assert_called_once_with( - "alarm-id", **alarm_diff) - # ensure that _create_alarm() doesn't modify the alarm dict as - # a side-effect - self.assertEqual(alarm_diff, orig_alarm_diff) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.update_alarm") - - def test__get_alarm_history(self): - self.assertEqual( - self.scenario._get_alarm_history("alarm-id"), - self.clients("ceilometer").alarms.get_history.return_value) - self.clients("ceilometer").alarms.get_history.assert_called_once_with( - "alarm-id") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.get_alarm_history") - - def test__get_alarm_state(self): - self.assertEqual( - self.scenario._get_alarm_state("alarm-id"), - self.clients("ceilometer").alarms.get_state.return_value) - self.clients("ceilometer").alarms.get_state.assert_called_once_with( - "alarm-id") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.get_alarm_state") - - def test__set_alarm_state(self): - alarm = mock.Mock() - self.clients("ceilometer").alarms.create.return_value = alarm - return_alarm = self.scenario._set_alarm_state(alarm, "ok", 100) - self.mock_wait_for_status.mock.assert_called_once_with( - alarm, - ready_statuses=["ok"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=100, check_interval=1) - self.mock_get_from_manager.mock.assert_called_once_with() - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_alarm) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.set_alarm_state") - - def test__list_events(self): - self.assertEqual( - self.scenario._list_events(), - self.admin_clients("ceilometer").events.list.return_value - ) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.list_events") - - def test__get_events(self): - self.assertEqual( - self.scenario._get_event(event_id="fake_id"), - self.admin_clients("ceilometer").events.get.return_value - ) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.get_event") - - def test__list_event_types(self): - self.assertEqual( - self.scenario._list_event_types(), - self.admin_clients("ceilometer").event_types.list.return_value - ) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.list_event_types") - - def test__list_event_traits(self): - self.assertEqual( - self.scenario._list_event_traits( - event_type="fake_event_type", trait_name="fake_trait_name"), - self.admin_clients("ceilometer").traits.list.return_value - ) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.list_event_traits") - - def test__list_event_trait_descriptions(self): - self.assertEqual( - self.scenario._list_event_trait_descriptions( - event_type="fake_event_type" - ), - self.admin_clients("ceilometer").trait_descriptions.list. - return_value - ) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), - "ceilometer.list_event_trait_descriptions") - - def test__list_meters(self): - self.assertEqual(self.scenario._list_meters(), - self.clients("ceilometer").meters.list.return_value) - self.clients("ceilometer").meters.list.assert_called_once_with( - q=None, limit=None) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.list_meters") - - def test__list_resources(self): - self.assertEqual( - self.scenario._list_resources(), - self.clients("ceilometer").resources.list.return_value) - self.clients("ceilometer").resources.list.assert_called_once_with( - q=None, limit=None) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.list_resources") - - def test__list_samples(self): - self.assertEqual( - self.scenario._list_samples(), - self.clients("ceilometer").new_samples.list.return_value) - self.clients("ceilometer").new_samples.list.assert_called_once_with( - q=None, limit=None) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.list_samples") - - def test__list_samples_with_query(self): - self.assertEqual( - self.scenario._list_samples(query=[{"field": "user_id", - "volume": "fake_id"}], - limit=10), - self.clients("ceilometer").new_samples.list.return_value) - self.clients("ceilometer").new_samples.list.assert_called_once_with( - q=[{"field": "user_id", "volume": "fake_id"}], limit=10) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.list_samples:limit&user_id") - - def test__get_resource(self): - self.assertEqual(self.scenario._get_resource("fake-resource-id"), - self.clients("ceilometer").resources.get.return_value) - self.clients("ceilometer").resources.get.assert_called_once_with( - "fake-resource-id") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.get_resource") - - def test__get_stats(self): - self.assertEqual( - self.scenario._get_stats("fake-meter"), - self.clients("ceilometer").statistics.list.return_value) - self.clients("ceilometer").statistics.list.assert_called_once_with( - "fake-meter", q=None, period=None, groupby=None, aggregates=None) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.get_stats") - - def test__create_meter(self): - self.scenario.generate_random_name = mock.Mock() - self.assertEqual( - self.scenario._create_meter(fakearg="fakearg"), - self.clients("ceilometer").samples.create.return_value[0]) - self.clients("ceilometer").samples.create.assert_called_once_with( - counter_name=self.scenario.generate_random_name.return_value, - fakearg="fakearg") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.create_meter") - - def test__query_alarms(self): - self.assertEqual( - self.scenario._query_alarms("fake-filter", "fake-orderby", 10), - self.clients("ceilometer").query_alarms.query.return_value) - self.clients("ceilometer").query_alarms.query.assert_called_once_with( - "fake-filter", "fake-orderby", 10) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.query_alarms") - - def test__query_alarm_history(self): - self.assertEqual( - self.scenario._query_alarm_history( - "fake-filter", "fake-orderby", 10), - self.clients("ceilometer").query_alarm_history.query.return_value) - self.clients( - "ceilometer").query_alarm_history.query.assert_called_once_with( - "fake-filter", "fake-orderby", 10) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.query_alarm_history") - - def test__query_samples(self): - self.assertEqual( - self.scenario._query_samples("fake-filter", "fake-orderby", 10), - self.clients("ceilometer").query_samples.query.return_value) - self.clients("ceilometer").query_samples.query.assert_called_once_with( - "fake-filter", "fake-orderby", 10) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.query_samples") - - def test__create_sample_no_resource_id(self): - self.scenario.generate_random_name = mock.Mock() - created_sample = self.scenario._create_sample("test-counter-name", - "test-counter-type", - "test-counter-unit", - "test-counter-volume") - self.assertEqual( - created_sample, - self.clients("ceilometer").samples.create.return_value) - self.clients("ceilometer").samples.create.assert_called_once_with( - counter_name="test-counter-name", - counter_type="test-counter-type", - counter_unit="test-counter-unit", - counter_volume="test-counter-volume", - resource_id=self.scenario.generate_random_name.return_value) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.create_sample") - - def test__create_sample(self): - created_sample = self.scenario._create_sample("test-counter-name", - "test-counter-type", - "test-counter-unit", - "test-counter-volume", - "test-resource-id") - self.assertEqual( - created_sample, - self.clients("ceilometer").samples.create.return_value) - self.clients("ceilometer").samples.create.assert_called_once_with( - counter_name="test-counter-name", - counter_type="test-counter-type", - counter_unit="test-counter-unit", - counter_volume="test-counter-volume", - resource_id="test-resource-id") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "ceilometer.create_sample") - - def test__make_general_query(self): - self.scenario.context = { - "user": {"tenant_id": "fake", "id": "fake_id"}, - "tenant": {"id": "fake_id", "resources": ["fake_resource"]}} - metadata = {"fake_field": "boo"} - expected = [ - {"field": "user_id", "value": "fake_id", "op": "eq"}, - {"field": "project_id", "value": "fake_id", "op": "eq"}, - {"field": "resource_id", "value": "fake_resource", "op": "eq"}, - {"field": "metadata.fake_field", "value": "boo", "op": "eq"}, - ] - - actual = self.scenario._make_general_query(True, True, True, metadata) - self.assertEqual(expected, actual) - - def test__make_query_item(self): - expected = {"field": "foo", "op": "eq", "value": "bar"} - self.assertEqual(expected, - self.scenario._make_query_item("foo", value="bar")) - - def test__make_profiler_key(self): - query = [ - {"field": "test_field1", "op": "eq", "value": "bar"}, - {"field": "test_field2", "op": "==", "value": None} - ] - limit = 100 - method = "fake_method" - actual = self.scenario._make_profiler_key(method, query, limit) - self.assertEqual("fake_method:limit&test_field1&test_field2", actual) - - actual = self.scenario._make_profiler_key(method, query, None) - self.assertEqual("fake_method:test_field1&test_field2", actual) - - self.assertEqual(method, - self.scenario._make_profiler_key(method, None, None)) diff --git a/tests/unit/plugins/openstack/scenarios/cinder/__init__.py b/tests/unit/plugins/openstack/scenarios/cinder/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/cinder/test_qos_specs.py b/tests/unit/plugins/openstack/scenarios/cinder/test_qos_specs.py deleted file mode 100644 index 0e975ecbff..0000000000 --- a/tests/unit/plugins/openstack/scenarios/cinder/test_qos_specs.py +++ /dev/null @@ -1,135 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions as rally_exceptions -from rally.plugins.openstack.scenarios.cinder import qos_specs -from tests.unit import test - - -class CinderQosTestCase(test.ScenarioTestCase): - - def setUp(self): - super(CinderQosTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.storage.block.BlockStorage") - self.addCleanup(patch.stop) - self.mock_cinder = patch.start() - - def _get_context(self): - context = test.get_test_context() - context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": {"id": "fake_user_id", - "credential": mock.MagicMock()}, - "tenant": {"id": "fake", "name": "fake"}}) - return context - - def test_create_and_list_qos(self): - mock_service = self.mock_cinder.return_value - qos = mock.MagicMock() - list_qos = [mock.MagicMock(), - mock.MagicMock(), - qos] - - specs = {"consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000"} - - scenario = qos_specs.CreateAndListQos(self._get_context()) - mock_service.create_qos.return_value = qos - mock_service.list_qos.return_value = list_qos - - scenario.run("both", "10", "1000") - mock_service.create_qos.assert_called_once_with(specs) - mock_service.list_qos.assert_called_once_with() - - def test_create_and_list_qos_with_fails(self): - mock_service = self.mock_cinder.return_value - qos = mock.MagicMock() - list_qos = [mock.MagicMock(), - mock.MagicMock(), - mock.MagicMock()] - specs = {"consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000"} - - scenario = qos_specs.CreateAndListQos(self._get_context()) - mock_service.create_qos.return_value = qos - mock_service.list_qos.return_value = list_qos - - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, "both", "10", "1000") - mock_service.create_qos.assert_called_once_with(specs) - mock_service.list_qos.assert_called_once_with() - - def test_create_and_get_qos(self): - mock_service = self.mock_cinder.return_value - qos = mock.MagicMock() - specs = {"consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000"} - - scenario = qos_specs.CreateAndGetQos(self._get_context()) - mock_service.create_qos.return_value = qos - - scenario.run("both", "10", "1000") - mock_service.create_qos.assert_called_once_with(specs) - mock_service.get_qos.assert_called_once_with(qos.id) - - def test_create_and_set_qos(self): - mock_service = self.mock_cinder.return_value - qos = mock.MagicMock() - create_specs_args = {"consumer": "back-end", - "write_iops_sec": "10", - "read_iops_sec": "1000"} - - set_specs_args = {"consumer": "both", - "write_iops_sec": "11", - "read_iops_sec": "1001"} - scenario = qos_specs.CreateAndSetQos(self._get_context()) - mock_service.create_qos.return_value = qos - - scenario.run("back-end", "10", "1000", - "both", "11", "1001") - mock_service.create_qos.assert_called_once_with(create_specs_args) - mock_service.set_qos.assert_called_once_with( - qos=qos, set_specs_args=set_specs_args) - - def test_create_qos_associate_and_disassociate_type(self): - mock_service = self.mock_cinder.return_value - context = self._get_context() - context.update({ - "volume_types": [{"id": "fake_id", - "name": "fake_name"}], - "iteration": 1}) - - qos = mock.MagicMock() - specs = {"consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000"} - - scenario = qos_specs.CreateQosAssociateAndDisassociateType(context) - mock_service.create_qos.return_value = qos - - scenario.run("both", "10", "1000") - mock_service.create_qos.assert_called_once_with(specs) - mock_service.qos_associate_type.assert_called_once_with( - qos_specs=qos, volume_type="fake_id") - mock_service.qos_disassociate_type.assert_called_once_with( - qos_specs=qos, volume_type="fake_id") diff --git a/tests/unit/plugins/openstack/scenarios/cinder/test_utils.py b/tests/unit/plugins/openstack/scenarios/cinder/test_utils.py deleted file mode 100644 index 63d4f7b10f..0000000000 --- a/tests/unit/plugins/openstack/scenarios/cinder/test_utils.py +++ /dev/null @@ -1,519 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import cfg -from rally import exceptions -from rally.plugins.openstack import osclients -from rally.plugins.openstack.scenarios.cinder import utils -from tests.unit import fakes -from tests.unit import test - -CINDER_UTILS = "rally.plugins.openstack.scenarios.cinder.utils" -CONF = cfg.CONF - - -class CinderBasicTestCase(test.ScenarioTestCase): - - def _get_context(self): - context = test.get_test_context() - context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": {"id": "fake_user_id", - "credential": mock.MagicMock()}, - "tenant": {"id": "fake", "name": "fake", - "volumes": [{"id": "uuid", "size": 1}], - "servers": [1]}}) - return context - - def setUp(self): - super(CinderBasicTestCase, self).setUp() - - @mock.patch("random.choice") - def test_get_random_server(self, mock_choice): - basic = utils.CinderBasic(self._get_context()) - server_id = mock_choice(basic.context["tenant"]["servers"]) - return_server = basic.get_random_server() - basic.clients("nova").servers.get.assert_called_once_with(server_id) - self.assertEqual(basic.clients("nova").servers.get.return_value, - return_server) - - -class CinderScenarioTestCase(test.ScenarioTestCase): - - def setUp(self): - super(CinderScenarioTestCase, self).setUp() - wrap = mock.patch("rally.plugins.openstack.wrappers.cinder.wrap") - self.mock_wrap = wrap.start() - self.addCleanup(self.mock_wrap.stop) - self.scenario = utils.CinderScenario( - self.context, - clients=osclients.Clients( - fakes.FakeUserContext.user["credential"])) - - def test__list_volumes(self): - return_volumes_list = self.scenario._list_volumes() - self.assertEqual(self.clients("cinder").volumes.list.return_value, - return_volumes_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.list_volumes") - - def test__list_types(self): - return_types_list = self.scenario._list_types() - self.assertEqual(self.clients("cinder").volume_types.list.return_value, - return_types_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.list_types") - - def test__get_volume(self): - volume = fakes.FakeVolume() - self.assertEqual(self.clients("cinder").volumes.get.return_value, - self.scenario._get_volume(volume.id)) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.get_volume") - - def test__list_snapshots(self): - return_snapshots_list = self.scenario._list_snapshots() - self.assertEqual( - self.clients("cinder").volume_snapshots.list.return_value, - return_snapshots_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.list_snapshots") - - def test__list_transfers(self): - return_transfers_list = self.scenario._list_transfers() - self.assertEqual( - self.clients("cinder").transfers.list.return_value, - return_transfers_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.list_transfers") - - def test__set_metadata(self): - volume = fakes.FakeVolume() - - self.scenario._set_metadata(volume, sets=2, set_size=4) - calls = self.clients("cinder").volumes.set_metadata.call_args_list - self.assertEqual(2, len(calls)) - for call in calls: - call_volume, metadata = call[0] - self.assertEqual(volume, call_volume) - self.assertEqual(4, len(metadata)) - - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.set_4_metadatas_2_times") - - def test__delete_metadata(self): - volume = fakes.FakeVolume() - - keys = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"] - self.scenario._delete_metadata(volume, keys, deletes=3, delete_size=4) - calls = self.clients("cinder").volumes.delete_metadata.call_args_list - self.assertEqual(3, len(calls)) - all_deleted = [] - for call in calls: - call_volume, del_keys = call[0] - self.assertEqual(volume, call_volume) - self.assertEqual(4, len(del_keys)) - for key in del_keys: - self.assertIn(key, keys) - self.assertNotIn(key, all_deleted) - all_deleted.append(key) - - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.delete_4_metadatas_3_times") - - def test__delete_metadata_not_enough_keys(self): - volume = fakes.FakeVolume() - - keys = ["a", "b", "c", "d", "e"] - self.assertRaises(exceptions.InvalidArgumentsException, - self.scenario._delete_metadata, - volume, keys, deletes=2, delete_size=3) - - def test__create_volume(self): - return_volume = self.scenario._create_volume(1) - self.mock_wait_for_status.mock.assert_called_once_with( - self.mock_wrap.return_value.create_volume.return_value, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - self.mock_get_from_manager.mock.assert_called_once_with() - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_volume) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.create_volume") - - @mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random") - def test__create_volume_with_size_range(self, mock_random): - mock_random.randint.return_value = 3 - - return_volume = self.scenario._create_volume( - size={"min": 1, "max": 5}, - display_name="TestVolume") - - self.mock_wrap.return_value.create_volume.assert_called_once_with( - 3, display_name="TestVolume") - - self.mock_wait_for_status.mock.assert_called_once_with( - self.mock_wrap.return_value.create_volume.return_value, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - self.mock_get_from_manager.mock.assert_called_once_with() - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_volume) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.create_volume") - - def test__update_volume(self): - fake_volume = mock.MagicMock() - volume_update_args = {"display_name": "_updated", - "display_description": "_updated"} - - self.scenario._update_volume(fake_volume, **volume_update_args) - self.mock_wrap.return_value.update_volume.assert_called_once_with( - fake_volume, - display_name="_updated", - display_description="_updated") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.update_volume") - - def test__update_readonly_flag(self): - fake_volume = mock.MagicMock() - self.scenario._update_readonly_flag(fake_volume, "fake_flag") - self.clients( - "cinder").volumes.update_readonly_flag.assert_called_once_with( - fake_volume, "fake_flag") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.update_readonly_flag") - - def test__delete_volume(self): - cinder = mock.Mock() - self.scenario._delete_volume(cinder) - cinder.delete.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - cinder, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=cfg.CONF.openstack.cinder_volume_create_timeout, - check_interval=cfg.CONF.openstack - .cinder_volume_create_poll_interval) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.delete_volume") - - @mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random") - def test__extend_volume_with_size_range(self, mock_random): - volume = mock.Mock() - mock_random.randint.return_value = 3 - self.clients("cinder").volumes.extend.return_value = volume - - self.scenario._extend_volume(volume, new_size={"min": 1, "max": 5}) - - volume.extend.assert_called_once_with(volume, 3) - self.mock_wait_for_status.mock.assert_called_once_with( - volume, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.extend_volume") - - def test__extend_volume(self): - volume = mock.Mock() - self.clients("cinder").volumes.extend.return_value = volume - self.scenario._extend_volume(volume, 2) - self.mock_wait_for_status.mock.assert_called_once_with( - volume, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.extend_volume") - - @mock.patch("rally.plugins.openstack.wrappers.glance.wrap") - def test__upload_volume_to_image(self, mock_wrap): - volume = mock.Mock() - image = {"os-volume_upload_image": {"image_id": 1}} - volume.upload_to_image.return_value = (None, image) - self.clients("cinder").images.get.return_value = image - - self.scenario.generate_random_name = mock.Mock( - return_value="test_vol") - self.scenario._upload_volume_to_image(volume, False, - "container", "disk") - - volume.upload_to_image.assert_called_once_with(False, "test_vol", - "container", "disk") - self.mock_wait_for_status.mock.assert_has_calls([ - mock.call( - volume, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack. - cinder_volume_create_poll_interval), - mock.call( - self.clients("glance").images.get.return_value, - ready_statuses=["active"], - update_resource=mock_wrap.return_value.get_image, - timeout=CONF.openstack.glance_image_create_timeout, - check_interval=CONF.openstack. - glance_image_create_poll_interval) - ]) - self.mock_get_from_manager.mock.assert_called_once_with() - self.clients("glance").images.get.assert_called_once_with(1) - - def test__create_snapshot(self): - return_snapshot = self.scenario._create_snapshot("uuid", False) - - self.mock_wait_for_status.mock.assert_called_once_with( - self.mock_wrap.return_value.create_snapshot.return_value, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=cfg.CONF.openstack.cinder_volume_create_timeout, - check_interval=cfg.CONF.openstack - .cinder_volume_create_poll_interval) - self.mock_get_from_manager.mock.assert_called_once_with() - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_snapshot) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.create_snapshot") - - def test__delete_snapshot(self): - snapshot = mock.Mock() - self.scenario._delete_snapshot(snapshot) - snapshot.delete.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - snapshot, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=cfg.CONF.openstack.cinder_volume_create_timeout, - check_interval=cfg.CONF.openstack - .cinder_volume_create_poll_interval) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.delete_snapshot") - - def test__create_backup(self): - return_backup = self.scenario._create_backup("uuid") - - self.mock_wait_for_status.mock.assert_called_once_with( - self.clients("cinder").backups.create.return_value, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=cfg.CONF.openstack.cinder_volume_create_timeout, - check_interval=cfg.CONF.openstack - .cinder_volume_create_poll_interval) - self.mock_get_from_manager.mock.assert_called_once_with() - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_backup) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.create_backup") - - def test__delete_backup(self): - backup = mock.Mock() - self.scenario._delete_backup(backup) - backup.delete.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - backup, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=cfg.CONF.openstack.cinder_volume_create_timeout, - check_interval=cfg.CONF.openstack - .cinder_volume_create_poll_interval) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.delete_backup") - - def test__restore_backup(self): - # NOTE(mdovgal): added for pep8 visual indent test passing - bench_cfg = cfg.CONF.openstack - - backup = mock.Mock() - restore = mock.Mock() - self.clients("cinder").restores.restore.return_value = backup - self.clients("cinder").backups.get.return_value = backup - self.clients("cinder").volumes.get.return_value = restore - - return_restore = self.scenario._restore_backup(backup.id, None) - self.mock_wait_for_status.mock.assert_has_calls([ - mock.call( - backup, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=bench_cfg.cinder_backup_restore_timeout, - check_interval=bench_cfg.cinder_backup_restore_poll_interval), - mock.call( - restore, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=bench_cfg.cinder_volume_create_timeout, - check_interval=bench_cfg.cinder_volume_create_poll_interval) - ]) - - self.mock_get_from_manager.mock.assert_has_calls([mock.call(), - mock.call()]) - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_restore) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.restore_backup") - - def test__list_backups(self): - return_backups_list = self.scenario._list_backups() - self.assertEqual( - self.clients("cinder").backups.list.return_value, - return_backups_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.list_backups") - - def test__get_random_server(self): - servers = [1, 2, 3] - context = {"user": {"tenant_id": "fake"}, - "users": [{"tenant_id": "fake", - "users_per_tenant": 1}], - "tenant": {"id": "fake", "servers": servers}} - self.scenario.context = context - self.scenario.clients = mock.Mock() - self.scenario.clients("nova").servers.get = mock.Mock( - side_effect=lambda arg: arg) - - server_id = self.scenario.get_random_server() - - self.assertIn(server_id, servers) - - def test__create_volume_type(self, **kwargs): - random_name = "random_name" - self.scenario.generate_random_name = mock.Mock( - return_value=random_name) - - result = self.scenario._create_volume_type() - - self.assertEqual( - self.admin_clients("cinder").volume_types.create.return_value, - result) - admin_clients = self.admin_clients("cinder") - admin_clients.volume_types.create.assert_called_once_with( - name="random_name") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.create_volume_type") - - def test__delete_encryption_type(self): - volume_type = mock.Mock() - - self.assertRaises(exceptions.RallyException, - self.scenario._delete_encryption_type, - volume_type) - - def test__create_encryption_type(self): - volume_type = mock.Mock() - specs = { - "provider": "foo_pro", - "cipher": "foo_cip", - "key_size": 512, - "control_location": "foo_con" - } - result = self.scenario._create_encryption_type(volume_type, specs) - - self.assertEqual( - self.admin_clients( - "cinder").volume_encryption_types.create.return_value, result) - self.admin_clients( - "cinder").volume_encryption_types.create.assert_called_once_with( - volume_type, specs) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.create_encryption_type") - - def test__list_encryption_type(self): - return_encryption_types_list = self.scenario._list_encryption_type() - client = self.admin_clients("cinder") - self.assertEqual(client.volume_encryption_types.list.return_value, - return_encryption_types_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.list_encryption_type") - - def test__get_volume_type(self): - volume_type = mock.Mock() - result = self.scenario._get_volume_type(volume_type) - self.assertEqual( - self.admin_clients("cinder").volume_types.get.return_value, - result) - - self.admin_clients("cinder").volume_types.get.assert_called_once_with( - volume_type) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.get_volume_type") - - def test__delete_volume_type(self): - volume_type = mock.Mock() - self.scenario._delete_volume_type(volume_type) - admin_clients = self.admin_clients("cinder") - admin_clients.volume_types.delete.assert_called_once_with( - volume_type) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.delete_volume_type") - - def test__transfer_create(self): - fake_volume = mock.MagicMock() - random_name = "random_name" - self.scenario.generate_random_name = mock.MagicMock( - return_value=random_name) - result = self.scenario._transfer_create(fake_volume.id) - self.assertEqual( - self.clients("cinder").transfers.create.return_value, - result) - self.clients("cinder").transfers.create.assert_called_once_with( - fake_volume.id, random_name) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.transfer_create") - - def test__transfer_accept(self): - fake_transfer = mock.MagicMock() - result = self.scenario._transfer_accept(fake_transfer.id, "fake_key") - self.assertEqual( - self.clients("cinder").transfers.accept.return_value, - result) - self.clients("cinder").transfers.accept.assert_called_once_with( - fake_transfer.id, "fake_key") - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.transfer_accept") - - def test__set_volume_type_keys(self): - volume_type = mock.MagicMock() - volume_type.set_keys = mock.MagicMock() - volume_type_key = {"volume_backend_name": "LVM_iSCSI"} - result = self.scenario._set_volume_type_keys(volume_type, - volume_type_key) - self.assertEqual(volume_type.set_keys.return_value, result) - volume_type.set_keys.assert_called_once_with(volume_type_key) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "cinder.set_volume_type_keys") diff --git a/tests/unit/plugins/openstack/scenarios/cinder/test_volume_backups.py b/tests/unit/plugins/openstack/scenarios/cinder/test_volume_backups.py deleted file mode 100644 index f49daa814f..0000000000 --- a/tests/unit/plugins/openstack/scenarios/cinder/test_volume_backups.py +++ /dev/null @@ -1,58 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.cinder import volume_backups -from tests.unit import test - - -class CinderBackupTestCase(test.ScenarioTestCase): - - def setUp(self): - super(CinderBackupTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.storage.block.BlockStorage") - self.addCleanup(patch.stop) - self.mock_cinder = patch.start() - - def _get_context(self): - context = test.get_test_context() - context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": {"id": "fake_user_id", - "credential": mock.MagicMock()}, - "tenant": {"id": "fake", "name": "fake"}}) - return context - - def test_create_incremental_volume_backup(self): - mock_service = self.mock_cinder.return_value - scenario = volume_backups.CreateIncrementalVolumeBackup( - self._get_context()) - - volume_kwargs = {"some_var": "zaq"} - backup_kwargs = {"incremental": True} - - scenario.run(1, do_delete=True, create_volume_kwargs=volume_kwargs, - create_backup_kwargs=backup_kwargs) - - self.assertEqual(2, mock_service.create_backup.call_count) - mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) - mock_service.delete_backup.assert_has_calls( - mock_service.create_backup.return_value) - mock_service.delete_volume.assert_called_once_with( - mock_service.create_volume.return_value) diff --git a/tests/unit/plugins/openstack/scenarios/cinder/test_volume_types.py b/tests/unit/plugins/openstack/scenarios/cinder/test_volume_types.py deleted file mode 100644 index f881ce884b..0000000000 --- a/tests/unit/plugins/openstack/scenarios/cinder/test_volume_types.py +++ /dev/null @@ -1,310 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions as rally_exceptions -from rally.plugins.openstack.scenarios.cinder import volume_types -from tests.unit import test - -CINDER_V2_PATH = ("rally.plugins.openstack.services.storage" - ".cinder_v2.CinderV2Service") - - -class CinderVolumeTypesTestCase(test.ScenarioTestCase): - - def setUp(self): - super(CinderVolumeTypesTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.storage.block.BlockStorage") - self.addCleanup(patch.stop) - self.mock_cinder = patch.start() - - def _get_context(self): - context = test.get_test_context() - context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": {"id": "fake_user_id", - "credential": mock.MagicMock()}, - "tenant": {"id": "fake", "name": "fake"}}) - return context - - def test_create_and_get_volume_type(self): - mock_service = self.mock_cinder.return_value - scenario = volume_types.CreateAndGetVolumeType(self._get_context()) - description = "rally tests creating types" - is_public = False - scenario.run(description=description, is_public=is_public) - mock_service.create_volume_type.assert_called_once_with( - description=description, is_public=is_public) - mock_service.get_volume_type.assert_called_once_with( - mock_service.create_volume_type.return_value) - - def test_create_and_delete_volume_type(self): - mock_service = self.mock_cinder.return_value - scenario = volume_types.CreateAndDeleteVolumeType(self._get_context()) - description = "rally tests creating types" - is_public = False - scenario.run(description=description, is_public=is_public) - mock_service.create_volume_type.assert_called_once_with( - description=description, is_public=is_public) - mock_service.delete_volume_type.assert_called_once_with( - mock_service.create_volume_type.return_value) - - def test_create_and_delete_encryption_type(self): - mock_service = self.mock_cinder.return_value - context = self._get_context() - context.update({ - "volume_types": [{"id": "fake_id", - "name": "fake_name"}], - "iteration": 1}) - scenario = volume_types.CreateAndDeleteEncryptionType( - context) - - # case: create_specs is None - specs = { - "provider": "prov", - "cipher": "cip", - "key_size": "ks", - "control_location": "cl" - } - scenario.run(create_specs=None, provider="prov", cipher="cip", - key_size="ks", control_location="cl") - mock_service.create_encryption_type.assert_called_once_with( - "fake_id", specs=specs) - mock_service.delete_encryption_type.assert_called_once_with( - "fake_id") - - # case: create_specs is not None - scenario.run(create_specs="fakecreatespecs", provider="prov", - cipher="cip", key_size="ks", control_location="cl") - mock_service.create_encryption_type.assert_called_with( - "fake_id", specs="fakecreatespecs") - mock_service.delete_encryption_type.assert_called_with( - "fake_id") - - def test_create_get_and_delete_encryption_type(self): - mock_service = self.mock_cinder.return_value - context = self._get_context() - context.update({ - "volume_types": [{"id": "fake_id", - "name": "fake_name"}], - "iteration": 1}) - scenario = volume_types.CreateGetAndDeleteEncryptionType( - context) - - specs = { - "provider": "prov", - "cipher": "cip", - "key_size": "ks", - "control_location": "cl" - } - scenario.run(provider="prov", cipher="cip", - key_size="ks", control_location="cl") - mock_service.create_encryption_type.assert_called_once_with( - "fake_id", specs=specs) - mock_service.get_encryption_type.assert_called_once_with( - "fake_id") - mock_service.delete_encryption_type.assert_called_once_with( - "fake_id") - - def test_create_and_list_volume_types(self): - mock_service = self.mock_cinder.return_value - fake_type = mock.Mock() - pool_list = [mock.Mock(), mock.Mock(), fake_type] - description = "rally tests creating types" - is_public = False - - scenario = volume_types.CreateAndListVolumeTypes(self._get_context()) - mock_service.create_volume_type.return_value = fake_type - mock_service.list_types.return_value = pool_list - scenario.run(description=description, is_public=is_public) - - mock_service.create_volume_type.assert_called_once_with( - description=description, is_public=is_public) - mock_service.list_types.assert_called_once_with() - - def test_create_and_list_volume_types_with_fails(self): - # Negative case: type isn't listed - mock_service = self.mock_cinder.return_value - fake_type = mock.Mock() - pool_list = [mock.Mock(), mock.Mock(), mock.Mock()] - description = "rally tests creating types" - is_public = False - - scenario = volume_types.CreateAndListVolumeTypes(self._get_context()) - mock_service.create_volume_type.return_value = fake_type - mock_service.list_types.return_value = pool_list - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - description=description, is_public=is_public) - - mock_service.create_volume_type.assert_called_once_with( - description=description, is_public=is_public) - mock_service.list_types.assert_called_once_with() - - @mock.patch("%s.create_volume_type" % CINDER_V2_PATH) - @mock.patch("%s.update_volume_type" % CINDER_V2_PATH) - def test_create_and_update_volume_type(self, mock_update_volume_type, - mock_create_volume_type): - scenario = volume_types.CreateAndUpdateVolumeType(self._get_context()) - fake_type = mock.MagicMock() - fake_type.name = "any" - create_description = "test create" - update_description = "test update" - mock_create_volume_type.return_value = fake_type - scenario.run(description=create_description, - update_description=update_description) - - mock_create_volume_type.assert_called_once_with( - description=create_description, - is_public=True) - mock_update_volume_type.assert_called_once_with( - fake_type, name="any", - description=update_description, - is_public=None) - - def test_create_volume_type_and_encryption_type(self): - mock_service = self.mock_cinder.return_value - scenario = volume_types.CreateVolumeTypeAndEncryptionType( - self._get_context()) - description = "rally tests creating types" - is_public = False - # case: create_specs is None - specs = { - "provider": "prov", - "cipher": "cip", - "key_size": "ks", - "control_location": "cl" - } - scenario.run(create_specs=None, provider="prov", cipher="cip", - key_size="ks", control_location="cl", - description=description, is_public=is_public) - mock_service.create_volume_type.assert_called_once_with( - description=description, is_public=is_public) - mock_service.create_encryption_type.assert_called_once_with( - mock_service.create_volume_type.return_value, specs=specs) - - # case: create_specs is not None - scenario.run(create_specs="fakecreatespecs", provider="prov", - cipher="cip", key_size="ks", control_location="cl", - description=description, is_public=is_public) - mock_service.create_volume_type.assert_called_with( - description=description, is_public=is_public) - mock_service.create_encryption_type.assert_called_with( - mock_service.create_volume_type.return_value, - specs="fakecreatespecs") - - def test_create_and_list_encryption_type(self): - mock_service = self.mock_cinder.return_value - context = self._get_context() - context.update({ - "volume_types": [{"id": "fake_id", - "name": "fake_name"}], - "iteration": 1}) - scenario = volume_types.CreateAndListEncryptionType( - context) - - # case: create_specs is None - specs = { - "provider": "prov", - "cipher": "cip", - "key_size": "ks", - "control_location": "cl" - } - scenario.run(create_specs=None, provider="prov", cipher="cip", - key_size="ks", control_location="cl", - search_opts="fakeopts") - mock_service.create_encryption_type.assert_called_once_with( - "fake_id", specs=specs) - mock_service.list_encryption_type.assert_called_once_with( - "fakeopts") - - # case: create_specs is not None - scenario.run(create_specs="fakecreatespecs", provider="prov", - cipher="cip", key_size="ks", control_location="cl", - search_opts="fakeopts") - mock_service.create_encryption_type.assert_called_with( - "fake_id", specs="fakecreatespecs") - mock_service.list_encryption_type.assert_called_with( - "fakeopts") - - def test_create_and_set_volume_type_keys(self): - mock_service = self.mock_cinder.return_value - volume_type_key = {"volume_backend_name": "LVM_iSCSI"} - description = "rally tests creating types" - is_public = False - scenario = volume_types.CreateAndSetVolumeTypeKeys( - self._get_context()) - scenario.run(volume_type_key, description=description, - is_public=is_public) - - mock_service.create_volume_type.assert_called_once_with( - description=description, is_public=is_public) - mock_service.set_volume_type_keys.assert_called_once_with( - mock_service.create_volume_type.return_value, - metadata=volume_type_key) - - def test_create_and_update_encryption_type(self): - mock_service = self.mock_cinder.return_value - context = self._get_context() - context.update({ - "volume_types": [{"id": "fake_id", - "name": "fake_name"}], - "iteration": 1}) - scenario = volume_types.CreateAndUpdateEncryptionType( - context) - - create_specs = { - "provider": "create_prov", - "cipher": "create_cip", - "key_size": "create_ks", - "control_location": "create_cl" - } - update_specs = { - "provider": "update_prov", - "cipher": "update_cip", - "key_size": "update_ks", - "control_location": "update_cl" - } - scenario.run(create_provider="create_prov", create_cipher="create_cip", - create_key_size="create_ks", - create_control_location="create_cl", - update_provider="update_prov", update_cipher="update_cip", - update_key_size="update_ks", - update_control_location="update_cl") - mock_service.create_encryption_type.assert_called_once_with( - "fake_id", specs=create_specs) - mock_service.update_encryption_type.assert_called_once_with( - "fake_id", specs=update_specs) - - @mock.patch("%s.list_type_access" % CINDER_V2_PATH) - @mock.patch("%s.add_type_access" % CINDER_V2_PATH) - @mock.patch("%s.create_volume_type" % CINDER_V2_PATH) - def test_create_volume_type_add_and_list_type_access( - self, mock_create_volume_type, mock_add_type_access, - mock_list_type_access): - scenario = volume_types.CreateVolumeTypeAddAndListTypeAccess( - self._get_context()) - fake_type = mock.Mock() - mock_create_volume_type.return_value = fake_type - - scenario.run(description=None, is_public=False) - mock_create_volume_type.assert_called_once_with( - description=None, is_public=False) - mock_add_type_access.assert_called_once_with(fake_type, project="fake") - mock_list_type_access.assert_called_once_with(fake_type) diff --git a/tests/unit/plugins/openstack/scenarios/cinder/test_volumes.py b/tests/unit/plugins/openstack/scenarios/cinder/test_volumes.py deleted file mode 100644 index a3a49329c1..0000000000 --- a/tests/unit/plugins/openstack/scenarios/cinder/test_volumes.py +++ /dev/null @@ -1,549 +0,0 @@ -# Copyright 2013 Huawei Technologies Co.,LTD. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.cinder import volumes -from tests.unit import test - -CINDER_VOLUMES = ("rally.plugins.openstack.scenarios.cinder.volumes") - - -@ddt.ddt -class CinderServersTestCase(test.ScenarioTestCase): - - def _get_context(self): - context = test.get_test_context() - context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": {"id": "fake_user_id", - "credential": mock.MagicMock()}, - "tenant": {"id": "fake", "name": "fake", - "volumes": [{"id": "uuid", "size": 1}], - "servers": [1]}}) - return context - - def setUp(self): - super(CinderServersTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.storage.block.BlockStorage") - self.addCleanup(patch.stop) - self.mock_cinder = patch.start() - - def test_create_and_list_volume(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndListVolume(self._get_context()) - scenario.run(1, True, fakearg="f") - - mock_service.create_volume.assert_called_once_with(1, fakearg="f") - mock_service.list_volumes.assert_called_once_with(True) - - def test_create_and_get_volume(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndGetVolume(self._get_context()) - scenario.run(1, fakearg="f") - mock_service.create_volume.assert_called_once_with(1, fakearg="f") - mock_service.get_volume.assert_called_once_with( - mock_service.create_volume.return_value.id) - - def test_list_volumes(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.ListVolumes(self._get_context()) - scenario.run(True) - mock_service.list_volumes.assert_called_once_with(True) - - def test_list_types(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.ListTypes(self._get_context()) - scenario.run(None, is_public=None) - mock_service.list_types.assert_called_once_with(None, - is_public=None) - - def test_list_transfers(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.ListTransfers(self._get_context()) - scenario._list_transfers = mock.MagicMock() - scenario.run(True, search_opts=None) - mock_service.list_transfers.assert_called_once_with( - True, search_opts=None) - - @ddt.data({"update_args": {"description": "desp"}, - "expected": {"description": "desp"}}, - {"update_args": {"update_name": True, "description": "desp"}, - "expected": {"name": "new_name", "description": "desp"}}) - @ddt.unpack - def test_create_and_update_volume(self, update_args, expected): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndUpdateVolume(self._get_context()) - scenario.generate_random_name = mock.MagicMock() - scenario.generate_random_name.return_value = "new_name" - scenario.run(1, update_volume_kwargs=update_args) - mock_service.create_volume.assert_called_once_with(1) - mock_service.update_volume.assert_called_once_with( - mock_service.create_volume.return_value, **expected) - if update_args.get("update_name", False): - scenario.generate_random_name.assert_called_once_with() - - def test_create_volume_and_update_readonly_flag(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateVolumeAndUpdateReadonlyFlag( - self._get_context()) - scenario.run(1, image=None, read_only=True, fakearg="f") - mock_service.create_volume.assert_called_once_with(1, fakearg="f") - mock_service.update_readonly_flag.assert_called_once_with( - mock_service.create_volume.return_value.id, read_only=True) - - def test_create_and_delete_volume(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndDeleteVolume(self._get_context()) - scenario.sleep_between = mock.MagicMock() - scenario.run(size=1, min_sleep=10, max_sleep=20, fakearg="f") - - mock_service.create_volume.assert_called_once_with(1, fakearg="f") - scenario.sleep_between.assert_called_once_with(10, 20) - mock_service.delete_volume.assert_called_once_with( - mock_service.create_volume.return_value) - - def test_create_volume(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateVolume(self._get_context()) - scenario.run(1, fakearg="f") - mock_service.create_volume.assert_called_once_with(1, fakearg="f") - - def test_create_volume_and_modify_metadata(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.ModifyVolumeMetadata(self._get_context()) - scenario.run(sets=5, set_size=4, deletes=3, delete_size=2) - mock_service.set_metadata.assert_called_once_with( - "uuid", set_size=4, sets=5) - mock_service.delete_metadata.assert_called_once_with( - "uuid", - keys=mock_service.set_metadata.return_value, - deletes=3, delete_size=2) - - def test_create_and_extend_volume(self): - mock_service = self.mock_cinder.return_value - - scenario = volumes.CreateAndExtendVolume(self._get_context()) - scenario.sleep_between = mock.MagicMock() - - scenario.run(1, 2, 10, 20, fakearg="f") - mock_service.create_volume.assert_called_once_with(1, fakearg="f") - mock_service.extend_volume.assert_called_once_with( - mock_service.create_volume.return_value, new_size=2) - scenario.sleep_between.assert_called_once_with(10, 20) - mock_service.delete_volume.assert_called_once_with( - mock_service.create_volume.return_value) - - def test_create_from_image_and_delete_volume(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndDeleteVolume(self._get_context()) - scenario.run(1, image="fake_image") - mock_service.create_volume.assert_called_once_with( - 1, imageRef="fake_image") - mock_service.delete_volume.assert_called_once_with( - mock_service.create_volume.return_value) - - def test_create_volume_from_image(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateVolume(self._get_context()) - scenario.run(1, image="fake_image") - mock_service.create_volume.assert_called_once_with( - 1, imageRef="fake_image") - - def test_create_volume_from_image_and_list(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndListVolume(self._get_context()) - scenario.run(1, True, "fake_image") - mock_service.create_volume.assert_called_once_with( - 1, imageRef="fake_image") - mock_service.list_volumes.assert_called_once_with(True) - - def test_create_from_volume_and_delete_volume(self): - mock_service = self.mock_cinder.return_value - vol_size = 1 - scenario = volumes.CreateFromVolumeAndDeleteVolume(self._get_context()) - scenario.run(vol_size) - mock_service.create_volume.assert_called_once_with( - 1, source_volid="uuid") - mock_service.delete_volume.assert_called_once_with( - mock_service.create_volume.return_value) - - @mock.patch("%s.CreateAndDeleteSnapshot.sleep_between" % CINDER_VOLUMES) - def test_create_and_delete_snapshot(self, mock_sleep_between): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndDeleteSnapshot(self._get_context()) - scenario.run(False, 10, 20, fakearg="f") - - mock_service.create_snapshot.assert_called_once_with("uuid", - force=False, - fakearg="f") - mock_sleep_between.assert_called_once_with(10, 20) - mock_service.delete_snapshot.assert_called_once_with( - mock_service.create_snapshot.return_value) - - def test_create_and_list_snapshots(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndListSnapshots(self._get_context()) - scenario.run(False, True, fakearg="f") - mock_service.create_snapshot.assert_called_once_with("uuid", - force=False, - fakearg="f") - mock_service.list_snapshots.assert_called_once_with(True) - - def test_create_and_attach_volume(self): - fake_server = mock.MagicMock() - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndAttachVolume(self._get_context()) - - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._delete_server = mock.MagicMock() - scenario._attach_volume = mock.MagicMock() - scenario._detach_volume = mock.MagicMock() - - volume_args = {"some_key": "some_val"} - vm_args = {"some_key": "some_val"} - - scenario.run(10, "img", "0", - create_volume_params=volume_args, - create_vm_params=vm_args) - - mock_service.create_volume.assert_called_once_with( - 10, **volume_args) - scenario._attach_volume.assert_called_once_with( - fake_server, mock_service.create_volume.return_value) - scenario._detach_volume.assert_called_once_with( - fake_server, mock_service.create_volume.return_value) - - mock_service.delete_volume.assert_called_once_with( - mock_service.create_volume.return_value) - scenario._delete_server.assert_called_once_with(fake_server) - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test_create_and_upload_volume_to_image(self, mock_image): - mock_volume_service = self.mock_cinder.return_value - mock_image_service = mock_image.return_value - scenario = volumes.CreateAndUploadVolumeToImage(self._get_context()) - - scenario.run(2, image="img", container_format="fake", - disk_format="disk", do_delete=False, fakeargs="fakeargs") - - mock_volume_service.create_volume.assert_called_once_with( - 2, imageRef="img", fakeargs="fakeargs") - mock_volume_service.upload_volume_to_image.assert_called_once_with( - mock_volume_service.create_volume.return_value, - container_format="fake", disk_format="disk", force=False) - - mock_volume_service.create_volume.reset_mock() - mock_volume_service.upload_volume_to_image.reset_mock() - - scenario.run(1, image=None, do_delete=True, fakeargs="fakeargs") - - mock_volume_service.create_volume.assert_called_once_with( - 1, fakeargs="fakeargs") - mock_volume_service.upload_volume_to_image.assert_called_once_with( - mock_volume_service.create_volume.return_value, - container_format="bare", disk_format="raw", force=False) - mock_volume_service.delete_volume.assert_called_once_with( - mock_volume_service.create_volume.return_value) - mock_image_service.delete_image.assert_called_once_with( - mock_volume_service.upload_volume_to_image.return_value.id) - - def test_create_snapshot_and_attach_volume(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateSnapshotAndAttachVolume(self._get_context()) - scenario._boot_server = mock.MagicMock() - scenario._attach_volume = mock.MagicMock() - scenario._detach_volume = mock.MagicMock() - scenario.run("img", "flavor") - - self.assertTrue(mock_service.create_volume.called) - volume = mock_service.create_volume.return_value - snapshot = mock_service.create_snapshot.return_value - mock_service.create_snapshot.assert_called_once_with(volume.id, - force=False) - mock_service.delete_snapshot.assert_called_once_with(snapshot) - scenario._attach_volume.assert_called_once_with( - scenario._boot_server.return_value, volume) - scenario._detach_volume.assert_called_once_with( - scenario._boot_server.return_value, volume) - mock_service.delete_volume.assert_called_once_with(volume) - - @mock.patch("random.choice") - def test_create_snapshot_and_attach_volume_use_volume_type_with_name( - self, mock_choice): - mock_service = self.mock_cinder.return_value - - scenario = volumes.CreateSnapshotAndAttachVolume(self._get_context()) - scenario._boot_server = mock.MagicMock() - scenario._attach_volume = mock.MagicMock() - scenario._detach_volume = mock.MagicMock() - scenario.run("img", "flavor", volume_type="type") - - fake_volume = mock_service.create_volume.return_value - fake_server = scenario._boot_server.return_value - fake_snapshot = mock_service.create_snapshot.return_value - - mock_service.create_volume.assert_called_once_with( - {"min": 1, "max": 5}, volume_type="type") - mock_service.create_snapshot.assert_called_once_with(fake_volume.id, - force=False) - mock_service.delete_snapshot.assert_called_once_with(fake_snapshot) - scenario._attach_volume.assert_called_once_with(fake_server, - fake_volume) - scenario._detach_volume.assert_called_once_with(fake_server, - fake_volume) - mock_service.delete_volume.assert_called_once_with(fake_volume) - - @mock.patch("random.randint") - def test_create_nested_snapshots_and_attach_volume(self, mock_randint): - mock_service = self.mock_cinder.return_value - mock_randint.return_value = 2 - volume_kwargs = {"volume_type": "type1"} - snapshot_kwargs = {"name": "snapshot1", "description": "snaphot one"} - - scenario = volumes.CreateNestedSnapshotsAndAttachVolume( - context=self._get_context()) - scenario._boot_server = mock.MagicMock() - scenario._attach_volume = mock.MagicMock() - scenario._detach_volume = mock.MagicMock() - scenario.run("img", "flavor", create_volume_kwargs=volume_kwargs, - create_snapshot_kwargs=snapshot_kwargs) - - mock_service.create_volume.assert_called_once_with( - mock_randint.return_value, **volume_kwargs) - mock_service.create_snapshot.assert_called_once_with( - mock_service.create_volume.return_value.id, force=False, - **snapshot_kwargs) - scenario._attach_volume(scenario._boot_server.return_value, - mock_service.create_volume.return_value) - mock_service.delete_volume.assert_called_once_with( - mock_service.create_volume.return_value) - mock_service.delete_snapshot.assert_called_once_with( - mock_service.create_snapshot.return_value) - scenario._detach_volume.assert_called_once_with( - scenario._boot_server.return_value, - mock_service.create_volume.return_value) - - @mock.patch("random.randint") - def test_create_nested_snapshots_and_attach_volume_2(self, mock_randint): - mock_service = self.mock_cinder.return_value - mock_randint.return_value = 2 - nested_level = 3 - volume_size = mock_randint.return_value - fake_volumes = [mock.Mock(size=volume_size) - for i in range(nested_level)] - fake_snapshots = [mock.Mock() - for i in range(nested_level)] - mock_service.create_volume.side_effect = fake_volumes - mock_service.create_snapshot.side_effect = fake_snapshots - - scenario = volumes.CreateNestedSnapshotsAndAttachVolume( - context=self._get_context()) - scenario._boot_server = mock.MagicMock() - scenario._attach_volume = mock.MagicMock() - scenario._detach_volume = mock.MagicMock() - scenario.run("img", "flavor", nested_level=nested_level) - - expected_volumes = [mock.call(volume_size)] - expected_snapshots = [mock.call(fake_volumes[0].id, force=False)] - expected_attachs = [mock.call(scenario._boot_server.return_value, - fake_volumes[0])] - for i in range(nested_level - 1): - expected_volumes.append( - mock.call(volume_size, snapshot_id=fake_snapshots[i].id)) - expected_snapshots.append( - mock.call(fake_volumes[i + 1].id, force=False)) - expected_attachs.append( - mock.call(scenario._boot_server.return_value, - fake_volumes[i + 1])) - - mock_service.create_volume.assert_has_calls(expected_volumes) - mock_service.create_snapshot.assert_has_calls(expected_snapshots) - scenario._attach_volume.assert_has_calls(expected_attachs) - fake_volumes.reverse() - fake_snapshots.reverse() - mock_service.delete_volume.assert_has_calls( - [mock.call(volume) for volume in fake_volumes]) - mock_service.delete_snapshot.assert_has_calls( - [mock.call(snapshot) for snapshot in fake_snapshots]) - scenario._detach_volume.assert_has_calls( - [mock.call(scenario._boot_server.return_value, - fake_volumes[i]) - for i in range(len(fake_volumes))]) - - def test_create_volume_backup(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateVolumeBackup(self._get_context()) - - volume_kwargs = {"some_var": "zaq"} - scenario.run(1, do_delete=True, create_volume_kwargs=volume_kwargs) - mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) - mock_service.create_backup.assert_called_once_with( - mock_service.create_volume.return_value.id) - mock_service.delete_volume.assert_called_once_with( - mock_service.create_volume.return_value) - mock_service.delete_backup.assert_called_once_with( - mock_service.create_backup.return_value) - - def test_create_volume_backup_no_delete(self): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateVolumeBackup(self._get_context()) - - volume_kwargs = {"some_var": "zaq"} - scenario.run(1, do_delete=False, create_volume_kwargs=volume_kwargs) - mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) - mock_service.create_backup.assert_called_once_with( - mock_service.create_volume.return_value.id) - self.assertFalse(mock_service.delete_volume.called) - self.assertFalse(mock_service.delete_backup.called) - - def test_create_and_restore_volume_backup(self): - mock_service = self.mock_cinder.return_value - volume_kwargs = {"some_var": "zaq"} - - scenario = volumes.CreateAndRestoreVolumeBackup(self._get_context()) - scenario.run(1, do_delete=True, create_volume_kwargs=volume_kwargs) - - fake_volume = mock_service.create_volume.return_value - fake_backup = mock_service.create_backup.return_value - mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) - mock_service.create_backup.assert_called_once_with(fake_volume.id) - mock_service.restore_backup.assert_called_once_with(fake_backup.id) - mock_service.delete_volume.assert_called_once_with(fake_volume) - mock_service.delete_backup.assert_called_once_with(fake_backup) - - def test_create_and_restore_volume_backup_no_delete(self): - mock_service = self.mock_cinder.return_value - volume_kwargs = {"some_var": "zaq"} - scenario = volumes.CreateAndRestoreVolumeBackup(self._get_context()) - scenario.run(1, do_delete=False, create_volume_kwargs=volume_kwargs) - - fake_volume = mock_service.create_volume.return_value - fake_backup = mock_service.create_backup.return_value - mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) - mock_service.create_backup.assert_called_once_with(fake_volume.id) - mock_service.restore_backup.assert_called_once_with(fake_backup.id) - self.assertFalse(mock_service.delete_volume.called) - self.assertFalse(mock_service.delete_backup.called) - - def test_create_and_list_volume_backups(self): - mock_service = self.mock_cinder.return_value - volume_kwargs = {"some_var": "zaq"} - scenario = volumes.CreateAndListVolumeBackups(self._get_context()) - scenario.run(1, detailed=True, do_delete=True, - create_volume_kwargs=volume_kwargs) - - fake_volume = mock_service.create_volume.return_value - fake_backup = mock_service.create_backup.return_value - mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) - mock_service.create_backup.assert_called_once_with(fake_volume.id) - mock_service.list_backups.assert_called_once_with(True) - mock_service.delete_volume.assert_called_once_with(fake_volume) - mock_service.delete_backup.assert_called_once_with(fake_backup) - - def test_create_and_list_volume_backups_no_delete(self): - mock_service = self.mock_cinder.return_value - volume_kwargs = {"some_var": "zaq"} - scenario = volumes.CreateAndListVolumeBackups(self._get_context()) - scenario.run(1, detailed=True, do_delete=False, - create_volume_kwargs=volume_kwargs) - - fake_volume = mock_service.create_volume.return_value - mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) - mock_service.create_backup.assert_called_once_with(fake_volume.id) - mock_service.list_backups.assert_called_once_with(True) - self.assertFalse(mock_service.delete_volume.called) - self.assertFalse(mock_service.delete_backup.called) - - @ddt.data({}, - {"nested_level": 2}, - {"image": "img"}) - @ddt.unpack - def test_create_volume_and_clone(self, nested_level=1, - image=None): - create_volumes_count = nested_level + 1 - fake_volumes = [mock.Mock(size=1) - for i in range(create_volumes_count)] - mock_service = self.mock_cinder.return_value - mock_service.create_volume.side_effect = fake_volumes - - scenario = volumes.CreateVolumeAndClone(self._get_context()) - scenario.run(1, image=image, nested_level=nested_level, - fakearg="fake") - - expected = [mock.call(1, imageRef=image, fakearg="fake") - if image else mock.call(1, fakearg="fake")] - for i in range(nested_level): - expected.append(mock.call(fake_volumes[i].size, - source_volid=fake_volumes[i].id, - fakearg="fake") - ) - self._test_atomic_action_timer(scenario.atomic_actions(), - "cinder.clone_volume", - count=nested_level) - mock_service.create_volume.assert_has_calls(expected) - - def test_create_volume_from_snapshot(self): - mock_service = self.mock_cinder.return_value - create_snapshot_args = {"force": False} - - scenario = volumes.CreateVolumeFromSnapshot(self._get_context()) - scenario.run(fakearg="f") - - fake_snapshot = mock_service.create_snapshot.return_value - fake_volume = mock_service.create_volume.return_value - mock_service.create_snapshot.assert_called_once_with("uuid") - mock_service.create_volume.assert_called_once_with( - 1, snapshot_id=fake_snapshot.id, fakearg="f") - mock_service.delete_snapshot.assert_called_once_with(fake_snapshot) - mock_service.delete_volume.assert_called_once_with(fake_volume) - - mock_service.create_snapshot.reset_mock() - mock_service.create_volume.reset_mock() - mock_service.delete_snapshot.reset_mock() - mock_service.delete_volume.reset_mock() - - scenario.run(do_delete=False, - create_snapshot_kwargs=create_snapshot_args, - fakearg="f") - - mock_service.create_snapshot.assert_called_once_with( - "uuid", **create_snapshot_args) - mock_service.create_volume.assert_called_once_with( - 1, snapshot_id=fake_snapshot.id, fakearg="f") - self.assertFalse(mock_service.delete_snapshot.called) - self.assertFalse(mock_service.delete_volume.called) - - @ddt.data({}, - {"image": "img"}) - @ddt.unpack - def test_create_and_accept_transfer(self, image=None): - mock_service = self.mock_cinder.return_value - scenario = volumes.CreateAndAcceptTransfer(self._get_context()) - scenario.run(1, image=image, fakearg="fake") - - expected = [mock.call(1, imageRef=image, fakearg="fake") - if image else mock.call(1, fakearg="fake")] - mock_service.create_volume.assert_has_calls(expected) - mock_service.transfer_create.assert_called_once_with( - mock_service.create_volume.return_value.id) - mock_service.transfer_accept.assert_called_once_with( - mock_service.transfer_create.return_value.id, - auth_key=mock_service.transfer_create.return_value.auth_key) diff --git a/tests/unit/plugins/openstack/scenarios/designate/__init__.py b/tests/unit/plugins/openstack/scenarios/designate/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/designate/test_basic.py b/tests/unit/plugins/openstack/scenarios/designate/test_basic.py deleted file mode 100644 index 68b661930c..0000000000 --- a/tests/unit/plugins/openstack/scenarios/designate/test_basic.py +++ /dev/null @@ -1,284 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Author: Endre Karlson -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.designate import basic -from tests.unit import test - -BASE = "rally.plugins.openstack.scenarios.designate.basic" - - -class DesignateBasicTestCase(test.ScenarioTestCase): - - @mock.patch("%s.CreateAndListDomains._list_domains" % BASE) - @mock.patch("%s.CreateAndListDomains._create_domain" % BASE) - def test_create_and_list_domains(self, - mock__create_domain, - mock__list_domains): - mock__create_domain.return_value = "fake_domain.xyz" - mock__list_domains.return_value = (["fake_domain.org", - "fake_domain.xyz", - "ultimate_question.net"]) - basic.CreateAndListDomains(self.context).run() - mock__create_domain.assert_called_once_with() - mock__list_domains.assert_called_once_with() - - @mock.patch("%s.CreateAndListDomains._list_domains" % BASE) - @mock.patch("%s.CreateAndListDomains._create_domain" % BASE) - def test_create_and_list_domains_fails(self, - mock__create_domain, - mock__list_domains): - mock__list_domains.return_value = (["fake_domain.org", - "fake_domain.xyz", - "ultimate_question.net"]) - scenario = basic.CreateAndListDomains(self.context) - self.assertRaises(exceptions.RallyAssertionError, scenario.run) - mock__create_domain.assert_called_once_with() - - mock__create_domain.return_value = "fake_not_existed_domain.xyz" - self.assertRaises(exceptions.RallyAssertionError, scenario.run) - mock__create_domain.assert_called_with() - mock__list_domains.assert_called_with() - - @mock.patch("%s.CreateAndDeleteDomain._delete_domain" % BASE) - @mock.patch("%s.CreateAndDeleteDomain._create_domain" % BASE, - return_value={"id": "123"}) - def test_create_and_delete_domain(self, - mock__create_domain, - mock__delete_domain): - - basic.CreateAndDeleteDomain(self.context).run() - - mock__create_domain.assert_called_once_with() - mock__delete_domain.assert_called_once_with("123") - - @mock.patch("%s.CreateAndUpdateDomain._update_domain" % BASE) - @mock.patch("%s.CreateAndUpdateDomain._create_domain" % BASE) - def test_create_and_update_domain(self, - mock__create_domain, - mock__update_domain): - domain = { - "name": "zone.name", - "email": "email@zone.name", - "id": "123"} - mock__create_domain.return_value = domain - basic.CreateAndUpdateDomain(self.context).run() - mock__update_domain.assert_called_once_with(domain) - - @mock.patch("%s.ListDomains._list_domains" % BASE) - def test_list_domains(self, mock__list_domains): - basic.ListDomains(self.context).run() - mock__list_domains.assert_called_once_with() - - @mock.patch("%s.CreateAndListRecords._list_records" % BASE) - @mock.patch("%s.CreateAndListRecords._create_record" % BASE) - @mock.patch("%s.CreateAndListRecords._create_domain" % BASE) - def test_create_and_list_records(self, - mock__create_domain, - mock__create_record, - mock__list_records): - domain = { - "name": "zone.name", - "email": "email@zone.name", - "id": "123"} - mock__create_domain.return_value = domain - records_per_domain = 5 - return_value = mock.call(domain) - mock__create_record.return_value = return_value - mock__list_records.return_value = [return_value] * records_per_domain - - basic.CreateAndListRecords(self.context).run( - records_per_domain=records_per_domain) - mock__create_domain.assert_called_once_with() - - self.assertEqual(mock__create_record.mock_calls, - [return_value] - * records_per_domain) - mock__list_records.assert_called_once_with(domain["id"]) - - @mock.patch("%s.CreateAndDeleteRecords._delete_record" % BASE) - @mock.patch("%s.CreateAndDeleteRecords._create_record" % BASE) - @mock.patch("%s.CreateAndDeleteRecords._create_domain" % BASE) - def test_create_and_delete_records(self, - mock__create_domain, - mock__create_record, - mock__delete_record): - domain = { - "name": "zone.name", - "email": "email@zone.name", - "id": "123"} - mock__create_domain.return_value = domain - mock__create_record.return_value = {"id": "321"} - records_per_domain = 5 - - basic.CreateAndDeleteRecords(self.context).run( - records_per_domain=records_per_domain) - mock__create_domain.assert_called_once_with() - self.assertEqual(mock__create_record.mock_calls, - [mock.call(domain)] - * records_per_domain) - self.assertEqual(mock__delete_record.mock_calls, - [mock.call(domain["id"], - "321")] - * records_per_domain) - - @mock.patch("%s.ListRecords._list_records" % BASE) - def test_list_records(self, mock__list_records): - basic.ListRecords(self.context).run("123") - mock__list_records.assert_called_once_with("123") - - @mock.patch("%s.CreateAndListServers._list_servers" % BASE) - @mock.patch("%s.CreateAndListServers._create_server" % BASE) - def test_create_and_list_servers(self, - mock__create_server, - mock__list_servers): - mock__create_server.return_value = "fake_server" - mock__list_servers.return_value = ["fake_srv1", - "fake_srv2", - "fake_server"] - - # Positive case: - basic.CreateAndListServers(self.context).run() - - mock__create_server.assert_called_once_with() - mock__list_servers.assert_called_once_with() - - # Negative case: server isn't created - mock__create_server.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - basic.CreateAndListServers(self.context).run) - - mock__create_server.assert_called_with() - - # Negative case: server not found in the list of existed servers - mock__create_server.return_value = "The_main_server_of_the_universe" - self.assertRaises(exceptions.RallyAssertionError, - basic.CreateAndListServers(self.context).run) - - mock__create_server.assert_called_with() - mock__list_servers.assert_called_with() - - @mock.patch("%s.CreateAndDeleteServer._delete_server" % BASE) - @mock.patch("%s.CreateAndDeleteServer._create_server" % BASE, - return_value={"id": "123"}) - def test_create_and_delete_server(self, - mock__create_server, - mock__delete_server): - basic.CreateAndDeleteServer(self.context).run() - - mock__create_server.assert_called_once_with() - mock__delete_server.assert_called_once_with("123") - - @mock.patch("%s.ListServers._list_servers" % BASE) - def test_list_servers(self, mock__list_servers): - basic.ListServers(self.context).run() - mock__list_servers.assert_called_once_with() - - # NOTE: API V2 - @mock.patch("%s.CreateAndListZones._list_zones" % BASE) - @mock.patch("%s.CreateAndListZones._create_zone" % BASE) - def test_create_and_list_zones(self, - mock__create_zone, - mock__list_zones): - mock__create_zone.return_value = "Area_51" - mock__list_zones.return_value = ["Area_51", - "Siachen", - "Bagram"] - # Positive case: - basic.CreateAndListZones(self.context).run() - mock__create_zone.assert_called_once_with() - mock__list_zones.assert_called_once_with() - - # Negative case: zone isn't created - mock__create_zone.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - basic.CreateAndListZones(self.context).run) - mock__create_zone.assert_called_with() - - # Negative case: created zone not in the list of available zones - mock__create_zone.return_value = "HAARP" - self.assertRaises(exceptions.RallyAssertionError, - basic.CreateAndListZones(self.context).run) - mock__create_zone.assert_called_with() - mock__list_zones.assert_called_with() - - @mock.patch("%s.CreateAndDeleteZone._delete_zone" % BASE) - @mock.patch("%s.CreateAndDeleteZone._create_zone" % BASE, - return_value={"id": "123"}) - def test_create_and_delete_zone(self, - mock__create_zone, - mock__delete_zone): - basic.CreateAndDeleteZone(self.context).run() - - mock__create_zone.assert_called_once_with() - mock__delete_zone.assert_called_once_with("123") - - @mock.patch("%s.ListZones._list_zones" % BASE) - def test_list_zones(self, mock_list_zones__list_zones): - basic.ListZones(self.context).run() - mock_list_zones__list_zones.assert_called_once_with() - - @mock.patch("%s.ListRecordsets._list_recordsets" % BASE) - def test_list_recordsets(self, mock__list_recordsets): - basic.ListRecordsets(self.context).run("123") - mock__list_recordsets.assert_called_once_with("123") - - @mock.patch("%s.CreateAndDeleteRecordsets._delete_recordset" % BASE) - @mock.patch("%s.CreateAndDeleteRecordsets._create_recordset" % BASE, - return_value={"id": "321"}) - def test_create_and_delete_recordsets(self, - mock__create_recordset, - mock__delete_recordset): - zone = {"id": "1234"} - self.context.update({ - "tenant": { - "zones": [zone] - } - }) - - recordsets_per_zone = 5 - - basic.CreateAndDeleteRecordsets(self.context).run( - recordsets_per_zone=recordsets_per_zone) - self.assertEqual(mock__create_recordset.mock_calls, - [mock.call(zone)] - * recordsets_per_zone) - self.assertEqual(mock__delete_recordset.mock_calls, - [mock.call(zone["id"], - "321")] - * recordsets_per_zone) - - @mock.patch("%s.CreateAndListRecordsets._list_recordsets" % BASE) - @mock.patch("%s.CreateAndListRecordsets._create_recordset" % BASE) - def test_create_and_list_recordsets(self, - mock__create_recordset, - mock__list_recordsets): - zone = {"id": "1234"} - self.context.update({ - "tenant": { - "zones": [zone] - } - }) - recordsets_per_zone = 5 - - basic.CreateAndListRecordsets(self.context).run( - recordsets_per_zone=recordsets_per_zone) - self.assertEqual(mock__create_recordset.mock_calls, - [mock.call(zone)] - * recordsets_per_zone) - mock__list_recordsets.assert_called_once_with(zone["id"]) diff --git a/tests/unit/plugins/openstack/scenarios/designate/test_utils.py b/tests/unit/plugins/openstack/scenarios/designate/test_utils.py deleted file mode 100644 index bb5f6386b1..0000000000 --- a/tests/unit/plugins/openstack/scenarios/designate/test_utils.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright 2014 Hewlett-Packard Development Company, L.P. -# -# Author: Endre Karlson -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.designate import utils -from tests.unit import test - -DESIGNATE_UTILS = "rally.plugins.openstack.scenarios.designate.utils." - - -@ddt.ddt -class DesignateScenarioTestCase(test.ScenarioTestCase): - - def setUp(self): - super(DesignateScenarioTestCase, self).setUp() - self.domain = mock.Mock() - self.zone = mock.Mock() - self.server = mock.Mock() - - self.client = self.clients("designate", version="2") - - @ddt.data( - {}, - {"email": "root@zone.name"}) - def test_create_domain(self, domain_data): - random_name = "foo" - scenario = utils.DesignateScenario(context=self.context) - scenario.generate_random_name = mock.Mock(return_value=random_name) - self.clients("designate").domains.create.return_value = self.domain - expected = {"email": "root@random.name"} - expected.update(domain_data) - expected["name"] = "%s.name." % random_name - - domain = scenario._create_domain(domain_data) - self.clients("designate").domains.create.assert_called_once_with( - expected) - self.assertEqual(self.domain, domain) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.create_domain") - - def test_list_domains(self): - scenario = utils.DesignateScenario(context=self.context) - return_domains_list = scenario._list_domains() - self.assertEqual(self.clients("designate").domains.list.return_value, - return_domains_list) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.list_domains") - - def test_delete_domain(self): - scenario = utils.DesignateScenario(context=self.context) - - domain = scenario._create_domain() - scenario._delete_domain(domain["id"]) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.delete_domain") - - def test_update_domain(self): - scenario = utils.DesignateScenario(context=self.context) - domain = scenario._create_domain() - self.clients("designate").domains.update.return_value = self.domain - updated_domain = scenario._update_domain(domain) - self.clients("designate").domains.update.assert_called_once_with( - domain) - self.assertEqual(self.domain, updated_domain) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.update_domain") - - @ddt.data( - {}, - {"data": "127.0.0.1"}) - def test_create_record(self, record_data): - random_name = "foo" - domain_name = "zone.name." - domain = {"name": domain_name, "id": "123"} - record_name = "%s.%s" % (random_name, domain_name) - - scenario = utils.DesignateScenario(context=self.context) - scenario.generate_random_name = mock.Mock(return_value=random_name) - - expected = {"type": "A", "data": "10.0.0.1"} - expected.update(record_data) - expected["name"] = record_name - - scenario._create_record(domain, record=record_data) - self.clients("designate").records.create.assert_called_once_with( - domain["id"], expected) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.create_record") - - def test_list_records(self): - scenario = utils.DesignateScenario(context=self.context) - return_records_list = scenario._list_records("123") - self.assertEqual(self.clients("designate").records.list.return_value, - return_records_list) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.list_records") - - def test_delete_record(self): - scenario = utils.DesignateScenario(context=self.context) - - domain_id = mock.Mock() - record_id = mock.Mock() - scenario._delete_record(domain_id, record_id) - self.clients("designate").records.delete.assert_called_once_with( - domain_id, record_id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.delete_record") - - self.clients("designate").records.delete.reset_mock() - scenario._delete_record(domain_id, record_id) - self.clients("designate").records.delete.assert_called_once_with( - domain_id, record_id) - - def test_create_server(self): - scenario = utils.DesignateScenario(context=self.context) - random_name = "foo" - scenario.generate_random_name = mock.Mock(return_value=random_name) - - explicit_name = "bar.io." - - self.admin_clients( - "designate").servers.create.return_value = self.server - - # Check that the defaults / randoms are used if nothing is specified - server = scenario._create_server() - self.admin_clients("designate").servers.create.assert_called_once_with( - {"name": "%s.name." % random_name}) - self.assertEqual(self.server, server) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.create_server") - - self.admin_clients("designate").servers.create.reset_mock() - - # Check that when specifying server name defaults are not used... - data = {"name": explicit_name} - server = scenario._create_server(data) - self.admin_clients( - "designate").servers.create.assert_called_once_with(data) - self.assertEqual(self.server, server) - - def test_delete_server(self): - scenario = utils.DesignateScenario(context=self.context) - - scenario._delete_server("foo_id") - self.admin_clients("designate").servers.delete.assert_called_once_with( - "foo_id") - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.delete_server") - - # NOTE: API V2 - @ddt.data( - {}, - {"email": "root@zone.name"}, - {"name": "example.name."}, - { - "email": "root@zone.name", - "name": "example.name." - }) - def test_create_zone(self, zone_data): - scenario = utils.DesignateScenario() - - random_name = "foo" - - scenario = utils.DesignateScenario(context=self.context) - scenario.generate_random_name = mock.Mock(return_value=random_name) - self.client.zones.create.return_value = self.zone - - expected = { - "email": "root@random.name", - "name": "%s.name." % random_name, - "type_": "PRIMARY" - } - expected.update(zone_data) - - # Check that the defaults / randoms are used if nothing is specified - zone = scenario._create_zone(**zone_data) - self.client.zones.create.assert_called_once_with( - description=None, - ttl=None, - **expected) - self.assertEqual(self.zone, zone) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.create_zone") - - def test_list_zones(self): - scenario = utils.DesignateScenario(context=self.context) - return_zones_list = scenario._list_zones() - self.assertEqual(self.client.zones.list.return_value, - return_zones_list) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.list_zones") - - def test_delete_zone(self): - scenario = utils.DesignateScenario(context=self.context) - - zone = scenario._create_zone() - scenario._delete_zone(zone["id"]) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.delete_zone") - - def test_list_recordsets(self): - scenario = utils.DesignateScenario(context=self.context) - return_recordsets_list = scenario._list_recordsets("123") - self.assertEqual( - self.client.recordsets.list.return_value, - return_recordsets_list) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.list_recordsets") - - @ddt.data( - {}, - {"data": "127.0.0.1"}) - def test_create_recordset(self, recordset_data): - scenario = utils.DesignateScenario() - - random_name = "foo" - zone_name = "zone.name." - random_recordset_name = "%s.%s" % (random_name, zone_name) - - scenario = utils.DesignateScenario(context=self.context) - scenario.generate_random_name = mock.Mock(return_value=random_name) - - zone = {"name": zone_name, "id": "123"} - - # Create with randoms (name and type) - scenario._create_recordset(zone) - - self.client.recordsets.create.assert_called_once_with( - zone["id"], - name=random_recordset_name, - type_="A", - records=["10.0.0.1"]) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.create_recordset") - - self.client.recordsets.create.reset_mock() - - # Specify name - recordset = {"name": "www.zone.name.", "type_": "ASD"} - scenario._create_recordset(zone, recordset) - self.client.recordsets.create.assert_called_once_with( - zone["id"], - name="www.zone.name.", - type_="ASD", - records=["10.0.0.1"]) - - self.client.recordsets.create.reset_mock() - - # Specify type without underscore - scenario._create_recordset(zone, {"type": "A"}) - self.client.recordsets.create.assert_called_once_with( - zone["id"], - name="foo.zone.name.", - type_="A", - records=["10.0.0.1"]) - - def test_delete_recordset(self): - scenario = utils.DesignateScenario(context=self.context) - - zone_id = mock.Mock() - recordset_id = mock.Mock() - scenario._delete_recordset(zone_id, recordset_id) - self.client.recordsets.delete.assert_called_once_with( - zone_id, recordset_id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "designate.delete_recordset") - - self.client.recordsets.delete.reset_mock() - scenario._delete_recordset(zone_id, recordset_id) - self.client.recordsets.delete.assert_called_once_with( - zone_id, recordset_id) diff --git a/tests/unit/plugins/openstack/scenarios/ec2/__init__.py b/tests/unit/plugins/openstack/scenarios/ec2/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/ec2/test_servers.py b/tests/unit/plugins/openstack/scenarios/ec2/test_servers.py deleted file mode 100644 index c80f0543be..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ec2/test_servers.py +++ /dev/null @@ -1,34 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.ec2 import servers -from tests.unit import test - - -class EC2ServersTestCase(test.ScenarioTestCase): - - def test_list_servers(self): - scenario = servers.ListServers(self.context) - scenario._list_servers = mock.MagicMock() - scenario.run() - scenario._list_servers.assert_called_once_with() - - def test_boot_server(self): - scenario = servers.BootServer(self.context) - scenario._boot_servers = mock.Mock() - scenario.run("foo_image", "foo_flavor", foo="bar") - scenario._boot_servers.assert_called_once_with( - "foo_image", "foo_flavor", foo="bar") diff --git a/tests/unit/plugins/openstack/scenarios/ec2/test_utils.py b/tests/unit/plugins/openstack/scenarios/ec2/test_utils.py deleted file mode 100644 index 69a241bff8..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ec2/test_utils.py +++ /dev/null @@ -1,71 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import cfg -from rally.plugins.openstack.scenarios.ec2 import utils -from tests.unit import test - -CONF = cfg.CONF - - -class EC2ScenarioTestCase(test.ScenarioTestCase): - - def setUp(self): - super(EC2ScenarioTestCase, self).setUp() - self.server1 = mock.MagicMock() - self.server2 = mock.MagicMock() - self.reservations = mock.MagicMock(instances=[self.server1, - self.server2]) - - def test__list_servers(self): - servers_list = [] - self.clients("ec2").get_only_instances.return_value = servers_list - ec2_scenario = utils.EC2Scenario() - return_servers_list = ec2_scenario._list_servers() - self.assertEqual(servers_list, return_servers_list) - self._test_atomic_action_timer(ec2_scenario.atomic_actions(), - "ec2.list_servers") - - def test__update_resource(self): - resource = mock.MagicMock() - scenario = utils.EC2Scenario(self.context) - self.assertEqual(scenario._update_resource(resource), resource) - resource.update.assert_called_once_with() - - def test__boot_servers(self): - self.clients("ec2").run_instances.return_value = self.reservations - ec2_scenario = utils.EC2Scenario(context={}) - ec2_scenario._update_resource = mock.Mock() - ec2_scenario._boot_servers("image", "flavor", 2) - expected = [ - mock.call( - self.server1, - ready_statuses=["RUNNING"], - update_resource=ec2_scenario._update_resource, - check_interval=CONF.openstack.ec2_server_boot_poll_interval, - timeout=CONF.openstack.ec2_server_boot_timeout - ), - mock.call( - self.server2, - ready_statuses=["RUNNING"], - update_resource=ec2_scenario._update_resource, - check_interval=CONF.openstack.ec2_server_boot_poll_interval, - timeout=CONF.openstack.ec2_server_boot_timeout - ) - ] - self.mock_wait_for_status.mock.assert_has_calls(expected) - self._test_atomic_action_timer(ec2_scenario.atomic_actions(), - "ec2.boot_servers") diff --git a/tests/unit/plugins/openstack/scenarios/glance/__init__.py b/tests/unit/plugins/openstack/scenarios/glance/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/glance/test_images.py b/tests/unit/plugins/openstack/scenarios/glance/test_images.py deleted file mode 100644 index 9b90c3a894..0000000000 --- a/tests/unit/plugins/openstack/scenarios/glance/test_images.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.glance import images -from tests.unit import fakes -from tests.unit import test - -BASE = "rally.plugins.openstack.scenarios.glance.images" -GLANCE_V2_PATH = ("rally.plugins.openstack.services.image.glance_v2." - "GlanceV2Service") - - -class GlanceBasicTestCase(test.ScenarioTestCase): - - def get_test_context(self): - context = super(GlanceBasicTestCase, self).get_test_context() - context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "tenant": {"id": "fake_tenant_id", - "name": "fake_tenant_name"} - }) - return context - - def setUp(self): - super(GlanceBasicTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.image.image.Image") - self.addCleanup(patch.stop) - self.mock_image = patch.start() - - def test_create_and_list_image(self): - image_service = self.mock_image.return_value - fake_image = mock.Mock(id=1, name="img_2") - image_service.create_image.return_value = fake_image - image_service.list_images.return_value = [ - mock.Mock(id=0, name="img_1"), - fake_image, - mock.Mock(id=2, name="img_3")] - properties = {"fakeprop": "fake"} - call_args = {"container_format": "cf", - "image_location": "url", - "disk_format": "df", - "visibility": "vs", - "min_disk": 0, - "min_ram": 0, - "properties": properties} - # Positive case - images.CreateAndListImage(self.context).run( - "cf", "url", "df", "vs", 0, 0, properties) - image_service.create_image.assert_called_once_with(**call_args) - - # Negative case: image isn't created - image_service.create_image.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - images.CreateAndListImage(self.context).run, - "cf", "url", "df", "vs", 0, 0, properties) - image_service.create_image.assert_called_with(**call_args) - - # Negative case: created image n ot in the list of available images - image_service.create_image.return_value = mock.Mock( - id=12, name="img_nameN") - self.assertRaises(exceptions.RallyAssertionError, - images.CreateAndListImage(self.context).run, - "cf", "url", "df", "vs", 0, 0, properties) - image_service.create_image.assert_called_with(**call_args) - image_service.list_images.assert_called_with() - - def test_list_images(self): - image_service = self.mock_image.return_value - - images.ListImages(self.context).run() - image_service.list_images.assert_called_once_with() - - def test_create_and_delete_image(self): - image_service = self.mock_image.return_value - - fake_image = fakes.FakeImage(id=1, name="imagexxx") - image_service.create_image.return_value = fake_image - properties = {"fakeprop": "fake"} - call_args = {"container_format": "cf", - "image_location": "url", - "disk_format": "df", - "visibility": "vs", - "min_disk": 0, - "min_ram": 0, - "properties": properties} - - images.CreateAndDeleteImage(self.context).run( - "cf", "url", "df", "vs", 0, 0, properties) - - image_service.create_image.assert_called_once_with(**call_args) - image_service.delete_image.assert_called_once_with(fake_image.id) - - def test_create_and_get_image(self): - image_service = self.mock_image.return_value - - fake_image = fakes.FakeImage(id=1, name="img_name1") - image_service.create_image.return_value = fake_image - fake_image_info = fakes.FakeImage(id=1, name="img_name1", - status="active") - image_service.get_image.return_value = fake_image_info - properties = {"fakeprop": "fake"} - call_args = {"container_format": "cf", - "image_location": "url", - "disk_format": "df", - "visibility": "vs", - "min_disk": 0, - "min_ram": 0, - "properties": properties} - - # Positive case - images.CreateAndGetImage(self.context).run( - "cf", "url", "df", "vs", 0, 0, properties) - image_service.create_image.assert_called_once_with(**call_args) - image_service.get_image.assert_called_once_with(fake_image) - - # Negative case: image isn't created - image_service.create_image.reset_mock() - image_service.create_image.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - images.CreateAndGetImage(self.context).run, - "cf", "url", "df", "vs", 0, 0, properties) - image_service.create_image.assert_called_with(**call_args) - - # Negative case: image obtained in _get_image not the created image - image_service.create_image.reset_mock() - image_service.get_image.reset_mock() - image_service.create_image.return_value = fakes.FakeImage( - id=12, name="img_nameN") - self.assertRaises(exceptions.RallyAssertionError, - images.CreateAndGetImage(self.context).run, - "cf", "url", "df", "vs", 0, 0, properties) - image_service.create_image.assert_called_with(**call_args) - image_service.get_image.assert_called_with( - image_service.create_image.return_value) - - def test_create_and_download_image(self): - image_service = self.mock_image.return_value - - fake_image = fakes.FakeImage() - image_service.create_image.return_value = fake_image - properties = {"fakeprop": "fake"} - call_args = {"container_format": "cf", - "image_location": "url", - "disk_format": "df", - "visibility": "vs", - "min_disk": 0, - "min_ram": 0, - "properties": properties} - - images.CreateAndDownloadImage(self.context).run( - "cf", "url", "df", "vs", 0, 0, properties=properties) - - image_service.create_image.assert_called_once_with(**call_args) - image_service.download_image.assert_called_once_with(fake_image.id) - - @mock.patch("%s.CreateImageAndBootInstances._boot_servers" % BASE) - def test_create_image_and_boot_instances(self, mock_boot_servers): - image_service = self.mock_image.return_value - - fake_image = fakes.FakeImage() - fake_servers = [mock.Mock() for i in range(5)] - image_service.create_image.return_value = fake_image - mock_boot_servers.return_value = fake_servers - boot_server_kwargs = {"fakeserverarg": "f"} - properties = {"fakeprop": "fake"} - call_args = {"container_format": "cf", - "image_location": "url", - "disk_format": "df", - "visibility": "vs", - "min_disk": 0, - "min_ram": 0, - "properties": properties} - - images.CreateImageAndBootInstances(self.context).run( - "cf", "url", "df", "fid", 5, visibility="vs", min_disk=0, - min_ram=0, properties=properties, - boot_server_kwargs=boot_server_kwargs) - image_service.create_image.assert_called_once_with(**call_args) - mock_boot_servers.assert_called_once_with("image-id-0", "fid", - 5, **boot_server_kwargs) - - def test_create_and_update_image(self): - image_service = self.mock_image.return_value - - fake_image = fakes.FakeImage(id=1, name="imagexxx") - image_service.create_image.return_value = fake_image - properties = {"fakeprop": "fake"} - create_args = {"container_format": "cf", - "image_location": "url", - "disk_format": "df", - "visibility": "vs", - "min_disk": 0, - "min_ram": 0, - "properties": properties} - - images.CreateAndUpdateImage(self.context).run( - "cf", "url", "df", None, "vs", 0, 0, properties, 0, 0) - - image_service.create_image.assert_called_once_with(**create_args) - image_service.update_image.assert_called_once_with( - fake_image.id, min_disk=0, min_ram=0, remove_props=None) - - @mock.patch("%s.create_image" % GLANCE_V2_PATH) - @mock.patch("%s.deactivate_image" % GLANCE_V2_PATH) - def test_create_and_deactivate_image(self, mock_deactivate_image, - mock_create_image): - fake_image = fakes.FakeImage(id=1, name="img_name1") - mock_create_image.return_value = fake_image - call_args = {"container_format": "cf", - "image_location": "url", - "disk_format": "df", - "visibility": "vs", - "min_disk": 0, - "min_ram": 0} - - images.CreateAndDeactivateImage(self.context).run( - "cf", "url", "df", "vs", 0, 0) - mock_create_image.assert_called_once_with(**call_args) - mock_deactivate_image.assert_called_once_with(fake_image.id) diff --git a/tests/unit/plugins/openstack/scenarios/glance/test_utils.py b/tests/unit/plugins/openstack/scenarios/glance/test_utils.py deleted file mode 100644 index 8824386e57..0000000000 --- a/tests/unit/plugins/openstack/scenarios/glance/test_utils.py +++ /dev/null @@ -1,90 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tempfile - -import ddt -import mock - -from rally.plugins.openstack.scenarios.glance import utils -from tests.unit import test - -GLANCE_UTILS = "rally.plugins.openstack.scenarios.glance.utils" - - -@ddt.ddt -class GlanceScenarioTestCase(test.ScenarioTestCase): - - def setUp(self): - super(GlanceScenarioTestCase, self).setUp() - self.image = mock.Mock() - self.image1 = mock.Mock() - self.scenario_clients = mock.Mock() - self.scenario_clients.glance.choose_version.return_value = 1 - - def test_list_images(self): - scenario = utils.GlanceScenario(context=self.context) - return_images_list = scenario._list_images() - self.clients("glance").images.list.assert_called_once_with() - self.assertEqual(list(self.clients("glance").images.list.return_value), - return_images_list) - self._test_atomic_action_timer(scenario.atomic_actions(), - "glance.list_images") - - @ddt.data({}, - {"name": "foo"}, - {"name": None}, - {"name": ""}, - {"name": "bar", "fakearg": "fakearg"}, - {"fakearg": "fakearg"}) - @mock.patch("rally.plugins.openstack.wrappers.glance.wrap") - def test_create_image(self, create_args, mock_wrap): - image_location = tempfile.NamedTemporaryFile() - mock_wrap.return_value.create_image.return_value = self.image - scenario = utils.GlanceScenario(context=self.context, - clients=self.scenario_clients) - scenario.generate_random_name = mock.Mock() - - return_image = scenario._create_image("container_format", - image_location.name, - "disk_format", - **create_args) - - expected_args = dict(create_args) - if not expected_args.get("name"): - expected_args["name"] = scenario.generate_random_name.return_value - - self.assertEqual(self.image, return_image) - mock_wrap.assert_called_once_with(scenario._clients.glance, scenario) - mock_wrap.return_value.create_image.assert_called_once_with( - "container_format", image_location.name, "disk_format", - **expected_args) - self._test_atomic_action_timer(scenario.atomic_actions(), - "glance.create_image") - - @mock.patch("rally.plugins.openstack.wrappers.glance.wrap") - def test_delete_image(self, mock_wrap): - deleted_image = mock.Mock(status="DELETED") - wrapper = mock_wrap.return_value - wrapper.get_image.side_effect = [self.image, deleted_image] - - scenario = utils.GlanceScenario(context=self.context, - clients=self.scenario_clients) - scenario._delete_image(self.image) - self.clients("glance").images.delete.assert_called_once_with( - self.image.id) - - mock_wrap.assert_called_once_with(scenario._clients.glance, scenario) - self._test_atomic_action_timer(scenario.atomic_actions(), - "glance.delete_image") diff --git a/tests/unit/plugins/openstack/scenarios/gnocchi/__init__.py b/tests/unit/plugins/openstack/scenarios/gnocchi/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/gnocchi/test_archive_policy_rule.py b/tests/unit/plugins/openstack/scenarios/gnocchi/test_archive_policy_rule.py deleted file mode 100644 index 1189cce229..0000000000 --- a/tests/unit/plugins/openstack/scenarios/gnocchi/test_archive_policy_rule.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.gnocchi import archive_policy_rule -from tests.unit import test - - -class GnocchiArchivePolicyRuleTestCase(test.ScenarioTestCase): - - def get_test_context(self): - context = super(GnocchiArchivePolicyRuleTestCase, - self).get_test_context() - context.update({ - "admin": { - "user_id": "fake", - "credential": mock.MagicMock() - }, - "user": { - "user_id": "fake", - "credential": mock.MagicMock() - }, - "tenant": {"id": "fake"} - }) - return context - - def setUp(self): - super(GnocchiArchivePolicyRuleTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.gnocchi.metric.GnocchiService") - self.addCleanup(patch.stop) - self.mock_metric = patch.start() - - def test_list_archive_policy_rule(self): - metric_service = self.mock_metric.return_value - scenario = archive_policy_rule.ListArchivePolicyRule(self.context) - scenario.run() - metric_service.list_archive_policy_rule.assert_called_once_with() - - def test_create_archive_policy_rule(self): - metric_service = self.mock_metric.return_value - scenario = archive_policy_rule.CreateArchivePolicyRule(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario.run(metric_pattern="foo_pat*", archive_policy_name="foo_pol") - metric_service.create_archive_policy_rule.assert_called_once_with( - "name", metric_pattern="foo_pat*", archive_policy_name="foo_pol") - - def test_create_delete_archive_policy_rule(self): - metric_service = self.mock_metric.return_value - scenario = archive_policy_rule.CreateDeleteArchivePolicyRule( - self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario.run(metric_pattern="foo_pat*", archive_policy_name="foo_pol") - metric_service.create_archive_policy_rule.assert_called_once_with( - "name", metric_pattern="foo_pat*", archive_policy_name="foo_pol") - metric_service.delete_archive_policy_rule.assert_called_once_with( - "name") diff --git a/tests/unit/plugins/openstack/scenarios/gnocchi/test_capabilities.py b/tests/unit/plugins/openstack/scenarios/gnocchi/test_capabilities.py deleted file mode 100644 index bd25162bbd..0000000000 --- a/tests/unit/plugins/openstack/scenarios/gnocchi/test_capabilities.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.gnocchi import capabilities -from tests.unit import test - - -class GnocchiCapabilitiesTestCase(test.ScenarioTestCase): - - def get_test_context(self): - context = super(GnocchiCapabilitiesTestCase, self).get_test_context() - context.update({ - "user": { - "user_id": "fake", - "credential": mock.MagicMock() - }, - "tenant": {"id": "fake"} - }) - return context - - def setUp(self): - super(GnocchiCapabilitiesTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.gnocchi.metric.GnocchiService") - self.addCleanup(patch.stop) - self.mock_metric = patch.start() - - def test__list_capabilities(self): - metric_service = self.mock_metric.return_value - capabilities.ListCapabilities(self.context).run() - metric_service.list_capabilities.assert_called_once_with() diff --git a/tests/unit/plugins/openstack/scenarios/gnocchi/test_status.py b/tests/unit/plugins/openstack/scenarios/gnocchi/test_status.py deleted file mode 100644 index 11bc330553..0000000000 --- a/tests/unit/plugins/openstack/scenarios/gnocchi/test_status.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.gnocchi import status -from tests.unit import test - - -class GnocchiStatusTestCase(test.ScenarioTestCase): - - def get_test_context(self): - context = super(GnocchiStatusTestCase, self).get_test_context() - context.update({ - "admin": { - "user_id": "fake", - "credential": mock.MagicMock() - } - }) - return context - - def setUp(self): - super(GnocchiStatusTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.gnocchi.metric.GnocchiService") - self.addCleanup(patch.stop) - self.mock_metric = patch.start() - - def test_get_status(self): - metric_service = self.mock_metric.return_value - status.GetStatus(self.context).run(False) - metric_service.get_status.assert_called_once_with(False) diff --git a/tests/unit/plugins/openstack/scenarios/gnocchi/test_utils.py b/tests/unit/plugins/openstack/scenarios/gnocchi/test_utils.py deleted file mode 100644 index 901935c402..0000000000 --- a/tests/unit/plugins/openstack/scenarios/gnocchi/test_utils.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.gnocchi import utils -from tests.unit import test - - -class GnocchiBaseTestCase(test.ScenarioTestCase): - - def setUp(self): - super(GnocchiBaseTestCase, self).setUp() - self.context = super(GnocchiBaseTestCase, self).get_test_context() - self.context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "tenant": {"id": "fake_tenant_id", - "name": "fake_tenant_name"} - }) - patch = mock.patch( - "rally.plugins.openstack.services.gnocchi.metric.GnocchiService") - self.addCleanup(patch.stop) - self.mock_service = patch.start() - - def test__gnocchi_base(self): - base = utils.GnocchiBase(self.context) - self.assertEqual(base.admin_gnocchi, - self.mock_service.return_value) - self.assertEqual(base.gnocchi, - self.mock_service.return_value) diff --git a/tests/unit/plugins/openstack/scenarios/heat/__init__.py b/tests/unit/plugins/openstack/scenarios/heat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/heat/test_stacks.py b/tests/unit/plugins/openstack/scenarios/heat/test_stacks.py deleted file mode 100644 index b877599bb6..0000000000 --- a/tests/unit/plugins/openstack/scenarios/heat/test_stacks.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.heat import stacks -from tests.unit import test - -BASE = "rally.plugins.openstack.scenarios.heat.stacks" - - -class HeatStacksTestCase(test.ScenarioTestCase): - - def setUp(self): - super(HeatStacksTestCase, self).setUp() - self.default_template = "heat_template_version: 2013-05-23" - self.default_parameters = {"dummy_param": "dummy_key"} - self.default_files = ["dummy_file.yaml"] - self.default_environment = {"env": "dummy_env"} - self.default_output_key = "dummy_output_key" - - @mock.patch("%s.CreateAndListStack._list_stacks" % BASE) - @mock.patch("%s.CreateAndListStack._create_stack" % BASE) - def test_create_and_list_stack(self, - mock__create_stack, - mock__list_stacks): - stack = mock.Mock() - mock__create_stack.return_value = stack - mock__list_stacks.return_value = [stack] * 3 - - # Positive case: - stacks.CreateAndListStack(self.context).run( - template_path=self.default_template, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_once_with( - self.default_template, self.default_parameters, - self.default_files, self.default_environment) - mock__list_stacks.assert_called_once_with() - - # Negative case1: stack isn't created - mock__create_stack.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - stacks.CreateAndListStack(self.context).run, - template_path=self.default_template, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_with( - self.default_template, self.default_parameters, - self.default_files, self.default_environment) - - # Negative case2: created stack not in the list of available stacks - fake_stack = mock.Mock() - mock__create_stack.return_value = fake_stack - self.assertRaises(exceptions.RallyAssertionError, - stacks.CreateAndListStack(self.context).run, - template_path=self.default_template, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_with( - self.default_template, self.default_parameters, - self.default_files, self.default_environment) - mock__list_stacks.assert_called_with() - - @mock.patch("%s.ListStacksAndResources._list_stacks" % BASE) - def test_list_stack_and_resources(self, mock__list_stacks): - stack = mock.Mock() - heat_scenario = stacks.ListStacksAndResources(self.context) - mock__list_stacks.return_value = [stack] - heat_scenario.run() - self.clients("heat").resources.list.assert_called_once_with( - stack.id) - self._test_atomic_action_timer(heat_scenario.atomic_actions(), - "heat.list_resources") - - @mock.patch("%s.ListStacksAndEvents._list_stacks" % BASE) - def test_list_stack_and_events(self, mock__list_stacks): - stack = mock.Mock() - mock__list_stacks.return_value = [stack] - heat_scenario = stacks.ListStacksAndEvents(self.context) - heat_scenario.run() - self.clients("heat").events.list.assert_called_once_with(stack.id) - self._test_atomic_action_timer( - heat_scenario.atomic_actions(), "heat.list_events") - - @mock.patch("%s.CreateAndDeleteStack._delete_stack" % BASE) - @mock.patch("%s.CreateAndDeleteStack._create_stack" % BASE) - @mock.patch("%s.CreateAndDeleteStack.generate_random_name" % BASE, - return_value="test-rally-stack") - def test_create_and_delete_stack(self, - mock_generate_random_name, - mock__create_stack, - mock__delete_stack): - fake_stack = object() - mock__create_stack.return_value = fake_stack - stacks.CreateAndDeleteStack(self.context).run( - template_path=self.default_template, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_once_with( - self.default_template, - self.default_parameters, - self.default_files, - self.default_environment) - mock__delete_stack.assert_called_once_with(fake_stack) - - @mock.patch("%s.CreateCheckDeleteStack._delete_stack" % BASE) - @mock.patch("%s.CreateCheckDeleteStack._check_stack" % BASE) - @mock.patch("%s.CreateCheckDeleteStack._create_stack" % BASE) - def test_create_check_delete_stack(self, - mock__create_stack, - mock__check_stack, - mock__delete_stack): - stacks.CreateCheckDeleteStack(self.context).run( - template_path=self.default_template, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_once_with( - self.default_template, self.default_parameters, - self.default_files, self.default_environment) - mock__check_stack.assert_called_once_with( - mock__create_stack.return_value) - mock__delete_stack.assert_called_once_with( - mock__create_stack.return_value) - - @mock.patch("%s.CreateUpdateDeleteStack._delete_stack" % BASE) - @mock.patch("%s.CreateUpdateDeleteStack._update_stack" % BASE) - @mock.patch("%s.CreateUpdateDeleteStack._create_stack" % BASE) - @mock.patch("%s.CreateUpdateDeleteStack.generate_random_name" % BASE, - return_value="test-rally-stack") - def test_create_update_delete_stack(self, - mock_generate_random_name, - mock__create_stack, - mock__update_stack, - mock__delete_stack): - fake_stack = object() - mock__create_stack.return_value = fake_stack - stacks.CreateUpdateDeleteStack(self.context).run( - template_path=self.default_template, - parameters=self.default_parameters, - updated_template_path=self.default_template, - files=self.default_files, - environment=self.default_environment - ) - - mock__create_stack.assert_called_once_with( - self.default_template, - self.default_parameters, - self.default_files, - self.default_environment) - mock__update_stack.assert_called_once_with( - fake_stack, self.default_template, - self.default_parameters, - self.default_files, - self.default_environment) - mock__delete_stack.assert_called_once_with(fake_stack) - - def test_create_stack_and_scale(self): - heat_scenario = stacks.CreateStackAndScale(self.context) - stack = mock.Mock() - heat_scenario._create_stack = mock.Mock(return_value=stack) - heat_scenario._scale_stack = mock.Mock() - - heat_scenario.run( - self.default_template, "key", -1, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - heat_scenario._create_stack.assert_called_once_with( - self.default_template, - self.default_parameters, - self.default_files, - self.default_environment) - heat_scenario._scale_stack.assert_called_once_with( - stack, "key", -1) - - @mock.patch("%s.CreateSuspendResumeDeleteStack._delete_stack" % BASE) - @mock.patch("%s.CreateSuspendResumeDeleteStack._resume_stack" % BASE) - @mock.patch("%s.CreateSuspendResumeDeleteStack._suspend_stack" % BASE) - @mock.patch("%s.CreateSuspendResumeDeleteStack._create_stack" % BASE) - def test_create_suspend_resume_delete_stack(self, - mock__create_stack, - mock__suspend_stack, - mock__resume_stack, - mock__delete_stack): - stacks.CreateSuspendResumeDeleteStack(self.context).run( - template_path=self.default_template, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_once_with( - self.default_template, - self.default_parameters, - self.default_files, - self.default_environment - ) - mock__suspend_stack.assert_called_once_with( - mock__create_stack.return_value) - mock__resume_stack.assert_called_once_with( - mock__create_stack.return_value) - mock__delete_stack.assert_called_once_with( - mock__create_stack.return_value) - - @mock.patch("%s.CreateSnapshotRestoreDeleteStack._delete_stack" % BASE) - @mock.patch("%s.CreateSnapshotRestoreDeleteStack._restore_stack" % BASE) - @mock.patch("%s.CreateSnapshotRestoreDeleteStack._snapshot_stack" % BASE, - return_value={"id": "dummy_id"}) - @mock.patch("%s.CreateSnapshotRestoreDeleteStack._create_stack" % BASE, - return_value=object()) - def test_create_snapshot_restore_delete_stack(self, - mock__create_stack, - mock__snapshot_stack, - mock__restore_stack, - mock__delete_stack): - - stacks.CreateSnapshotRestoreDeleteStack(self.context).run( - template_path=self.default_template, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_once_with( - self.default_template, self.default_parameters, - self.default_files, self.default_environment) - mock__snapshot_stack.assert_called_once_with( - mock__create_stack.return_value) - mock__restore_stack.assert_called_once_with( - mock__create_stack.return_value, "dummy_id") - mock__delete_stack.assert_called_once_with( - mock__create_stack.return_value) - - @mock.patch("%s.CreateStackAndShowOutputViaAPI" - "._stack_show_output_via_API" % BASE) - @mock.patch("%s.CreateStackAndShowOutputViaAPI._create_stack" % BASE) - def test_create_and_show_output_via_API(self, - mock__create_stack, - mock__stack_show_output_api): - stacks.CreateStackAndShowOutputViaAPI(self.context).run( - template_path=self.default_template, - output_key=self.default_output_key, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_once_with( - self.default_template, self.default_parameters, - self.default_files, self.default_environment) - mock__stack_show_output_api.assert_called_once_with( - mock__create_stack.return_value, self.default_output_key) - - @mock.patch("%s.CreateStackAndShowOutput._stack_show_output" % BASE) - @mock.patch("%s.CreateStackAndShowOutput._create_stack" % BASE) - def test_create_and_show_output(self, - mock__create_stack, - mock__stack_show_output): - stacks.CreateStackAndShowOutput(self.context).run( - template_path=self.default_template, - output_key=self.default_output_key, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_once_with( - self.default_template, self.default_parameters, - self.default_files, self.default_environment) - mock__stack_show_output.assert_called_once_with( - mock__create_stack.return_value, self.default_output_key) - - @mock.patch("%s.CreateStackAndListOutputViaAPI" - "._stack_list_output_via_API" % BASE) - @mock.patch("%s.CreateStackAndListOutputViaAPI._create_stack" % BASE) - def test_create_and_list_output_via_API(self, - mock__create_stack, - mock__stack_list_output_api): - stacks.CreateStackAndListOutputViaAPI(self.context).run( - template_path=self.default_template, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_once_with( - self.default_template, self.default_parameters, - self.default_files, self.default_environment) - mock__stack_list_output_api.assert_called_once_with( - mock__create_stack.return_value) - - @mock.patch("%s.CreateStackAndListOutput._stack_list_output" % BASE) - @mock.patch("%s.CreateStackAndListOutput._create_stack" % BASE) - def test_create_and_list_output(self, - mock__create_stack, - mock__stack_list_output): - stacks.CreateStackAndListOutput(self.context).run( - template_path=self.default_template, - parameters=self.default_parameters, - files=self.default_files, - environment=self.default_environment) - - mock__create_stack.assert_called_once_with( - self.default_template, self.default_parameters, - self.default_files, self.default_environment) - mock__stack_list_output.assert_called_once_with( - mock__create_stack.return_value) diff --git a/tests/unit/plugins/openstack/scenarios/heat/test_utils.py b/tests/unit/plugins/openstack/scenarios/heat/test_utils.py deleted file mode 100644 index 0bc7e30f8d..0000000000 --- a/tests/unit/plugins/openstack/scenarios/heat/test_utils.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.heat import utils -from tests.unit import test - -HEAT_UTILS = "rally.plugins.openstack.scenarios.heat.utils" - -CONF = utils.CONF - - -class HeatScenarioTestCase(test.ScenarioTestCase): - def setUp(self): - super(HeatScenarioTestCase, self).setUp() - self.stack = mock.Mock() - self.scenario = utils.HeatScenario(self.context) - self.default_template = "heat_template_version: 2013-05-23" - self.dummy_parameters = {"dummy_param": "dummy_key"} - self.dummy_files = ["dummy_file.yaml"] - self.dummy_environment = {"dummy_env": "dummy_env_value"} - self.default_output_key = "dummy_output_key" - - def test_list_stacks(self): - scenario = utils.HeatScenario(self.context) - return_stacks_list = scenario._list_stacks() - self.clients("heat").stacks.list.assert_called_once_with() - self.assertEqual(list(self.clients("heat").stacks.list.return_value), - return_stacks_list) - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.list_stacks") - - def test_create_stack(self): - self.clients("heat").stacks.create.return_value = { - "stack": {"id": "test_id"} - } - self.clients("heat").stacks.get.return_value = self.stack - return_stack = self.scenario._create_stack(self.default_template, - self.dummy_parameters, - self.dummy_files, - self.dummy_environment) - args, kwargs = self.clients("heat").stacks.create.call_args - self.assertIn(self.dummy_parameters, kwargs.values()) - self.assertIn(self.default_template, kwargs.values()) - self.assertIn(self.dummy_files, kwargs.values()) - self.assertIn(self.dummy_environment, kwargs.values()) - self.mock_wait_for_status.mock.assert_called_once_with( - self.stack, - update_resource=self.mock_get_from_manager.mock.return_value, - ready_statuses=["CREATE_COMPLETE"], - failure_statuses=["CREATE_FAILED", "ERROR"], - check_interval=CONF.openstack.heat_stack_create_poll_interval, - timeout=CONF.openstack.heat_stack_create_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_stack) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "heat.create_stack") - - def test_update_stack(self): - self.clients("heat").stacks.update.return_value = None - scenario = utils.HeatScenario(self.context) - scenario._update_stack(self.stack, self.default_template, - self.dummy_parameters, self.dummy_files, - self.dummy_environment) - args, kwargs = self.clients("heat").stacks.update.call_args - self.assertIn(self.dummy_parameters, kwargs.values()) - self.assertIn(self.default_template, kwargs.values()) - self.assertIn(self.dummy_files, kwargs.values()) - self.assertIn(self.dummy_environment, kwargs.values()) - self.assertIn(self.stack.id, args) - self.mock_wait_for_status.mock.assert_called_once_with( - self.stack, - update_resource=self.mock_get_from_manager.mock.return_value, - ready_statuses=["UPDATE_COMPLETE"], - failure_statuses=["UPDATE_FAILED", "ERROR"], - check_interval=CONF.openstack.heat_stack_update_poll_interval, - timeout=CONF.openstack.heat_stack_update_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.update_stack") - - def test_check_stack(self): - scenario = utils.HeatScenario(self.context) - scenario._check_stack(self.stack) - self.clients("heat").actions.check.assert_called_once_with( - self.stack.id) - self.mock_wait_for_status.mock.assert_called_once_with( - self.stack, - update_resource=self.mock_get_from_manager.mock.return_value, - ready_statuses=["CHECK_COMPLETE"], - failure_statuses=["CHECK_FAILED", "ERROR"], - check_interval=CONF.openstack.heat_stack_check_poll_interval, - timeout=CONF.openstack.heat_stack_check_timeout) - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.check_stack") - - def test_delete_stack(self): - scenario = utils.HeatScenario(self.context) - scenario._delete_stack(self.stack) - self.stack.delete.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.stack, - ready_statuses=["DELETE_COMPLETE"], - failure_statuses=["DELETE_FAILED", "ERROR"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.heat_stack_delete_poll_interval, - timeout=CONF.openstack.heat_stack_delete_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.delete_stack") - - def test_suspend_stack(self): - scenario = utils.HeatScenario(self.context) - scenario._suspend_stack(self.stack) - self.clients("heat").actions.suspend.assert_called_once_with( - self.stack.id) - self.mock_wait_for_status.mock.assert_called_once_with( - self.stack, - update_resource=self.mock_get_from_manager.mock.return_value, - ready_statuses=["SUSPEND_COMPLETE"], - failure_statuses=["SUSPEND_FAILED", "ERROR"], - check_interval=CONF.openstack.heat_stack_suspend_poll_interval, - timeout=CONF.openstack.heat_stack_suspend_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.suspend_stack") - - def test_resume_stack(self): - scenario = utils.HeatScenario(self.context) - scenario._resume_stack(self.stack) - self.clients("heat").actions.resume.assert_called_once_with( - self.stack.id) - self.mock_wait_for_status.mock.assert_called_once_with( - self.stack, - update_resource=self.mock_get_from_manager.mock.return_value, - ready_statuses=["RESUME_COMPLETE"], - failure_statuses=["RESUME_FAILED", "ERROR"], - check_interval=CONF.openstack.heat_stack_resume_poll_interval, - timeout=CONF.openstack.heat_stack_resume_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.resume_stack") - - def test_snapshot_stack(self): - scenario = utils.HeatScenario(self.context) - scenario._snapshot_stack(self.stack) - self.clients("heat").stacks.snapshot.assert_called_once_with( - self.stack.id) - self.mock_wait_for_status.mock.assert_called_once_with( - self.stack, - update_resource=self.mock_get_from_manager.mock.return_value, - ready_statuses=["SNAPSHOT_COMPLETE"], - failure_statuses=["SNAPSHOT_FAILED", "ERROR"], - check_interval=CONF.openstack.heat_stack_snapshot_poll_interval, - timeout=CONF.openstack.heat_stack_snapshot_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.snapshot_stack") - - def test_restore_stack(self): - scenario = utils.HeatScenario(self.context) - scenario._restore_stack(self.stack, "dummy_id") - self.clients("heat").stacks.restore.assert_called_once_with( - self.stack.id, "dummy_id") - self.mock_wait_for_status.mock.assert_called_once_with( - self.stack, - update_resource=self.mock_get_from_manager.mock.return_value, - ready_statuses=["RESTORE_COMPLETE"], - failure_statuses=["RESTORE_FAILED", "ERROR"], - check_interval=CONF.openstack.heat_stack_restore_poll_interval, - timeout=CONF.openstack.heat_stack_restore_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.restore_stack") - - def test__count_instances(self): - self.clients("heat").resources.list.return_value = [ - mock.Mock(resource_type="OS::Nova::Server"), - mock.Mock(resource_type="OS::Nova::Server"), - mock.Mock(resource_type="OS::Heat::AutoScalingGroup")] - scenario = utils.HeatScenario(self.context) - self.assertEqual(scenario._count_instances(self.stack), 2) - self.clients("heat").resources.list.assert_called_once_with( - self.stack.id, - nested_depth=1) - - def test__scale_stack(self): - scenario = utils.HeatScenario(self.context) - scenario._count_instances = mock.Mock(side_effect=[3, 3, 2]) - scenario._stack_webhook = mock.Mock() - - scenario._scale_stack(self.stack, "test_output_key", -1) - - scenario._stack_webhook.assert_called_once_with(self.stack, - "test_output_key") - self.mock_wait_for.mock.assert_called_once_with( - self.stack, - is_ready=mock.ANY, - failure_statuses=["UPDATE_FAILED", "ERROR"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=CONF.openstack.heat_stack_scale_timeout, - check_interval=CONF.openstack.heat_stack_scale_poll_interval) - self.mock_get_from_manager.mock.assert_called_once_with() - - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.scale_with_test_output_key") - - @mock.patch("requests.post") - def test_stack_webhook(self, mock_post): - scenario = utils.HeatScenario(self.context) - stack = mock.Mock(outputs=[ - {"output_key": "output1", "output_value": "url1"}, - {"output_key": "output2", "output_value": "url2"}]) - - scenario._stack_webhook(stack, "output1") - mock_post.assert_called_with("url1") - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.output1_webhook") - - @mock.patch("requests.post") - def test_stack_webhook_invalid_output_key(self, mock_post): - scenario = utils.HeatScenario(self.context) - stack = mock.Mock() - stack.outputs = [{"output_key": "output1", "output_value": "url1"}, - {"output_key": "output2", "output_value": "url2"}] - - self.assertRaises(exceptions.InvalidConfigException, - scenario._stack_webhook, stack, "bogus") - - def test_stack_show_output(self): - scenario = utils.HeatScenario(self.context) - scenario._stack_show_output(self.stack, self.default_output_key) - self.clients("heat").stacks.output_show.assert_called_once_with( - self.stack.id, self.default_output_key) - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.show_output") - - def test_stack_show_output_via_API(self): - scenario = utils.HeatScenario(self.context) - scenario._stack_show_output_via_API( - self.stack, self.default_output_key) - self.clients("heat").stacks.get.assert_called_once_with( - stack_id=self.stack.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.show_output_via_API") - - def test_stack_list_output(self): - scenario = utils.HeatScenario(self.context) - scenario._stack_list_output(self.stack) - self.clients("heat").stacks.output_list.assert_called_once_with( - self.stack.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.list_output") - - def test_stack_list_output_via_API(self): - scenario = utils.HeatScenario(self.context) - scenario._stack_list_output_via_API(self.stack) - self.clients("heat").stacks.get.assert_called_once_with( - stack_id=self.stack.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "heat.list_output_via_API") - - -class HeatScenarioNegativeTestCase(test.ScenarioTestCase): - patch_task_utils = False - - def test_failed_create_stack(self): - self.clients("heat").stacks.create.return_value = { - "stack": {"id": "test_id"} - } - stack = mock.Mock() - resource = mock.Mock() - resource.stack_status = "CREATE_FAILED" - stack.manager.get.return_value = resource - self.clients("heat").stacks.get.return_value = stack - scenario = utils.HeatScenario(context=self.context) - ex = self.assertRaises(exceptions.GetResourceErrorStatus, - scenario._create_stack, "stack_name") - self.assertIn("has CREATE_FAILED status", str(ex)) - - def test_failed_update_stack(self): - stack = mock.Mock() - resource = mock.Mock() - resource.stack_status = "UPDATE_FAILED" - stack.manager.get.return_value = resource - self.clients("heat").stacks.get.return_value = stack - scenario = utils.HeatScenario(context=self.context) - ex = self.assertRaises(exceptions.GetResourceErrorStatus, - scenario._update_stack, stack, - "heat_template_version: 2013-05-23") - self.assertIn("has UPDATE_FAILED status", str(ex)) diff --git a/tests/unit/plugins/openstack/scenarios/ironic/__init__.py b/tests/unit/plugins/openstack/scenarios/ironic/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/ironic/test_nodes.py b/tests/unit/plugins/openstack/scenarios/ironic/test_nodes.py deleted file mode 100644 index 76096525c5..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ironic/test_nodes.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.ironic import nodes -from tests.unit import test - - -class IronicNodesTestCase(test.ScenarioTestCase): - - def test_create_and_list_node(self): - class Node(object): - def __init__(self, name): - self.name = name - - scenario = nodes.CreateAndListNode(self.context) - scenario._create_node = mock.Mock(return_value=Node("node_obj1")) - scenario._list_nodes = mock.Mock( - return_value=[Node(name) - for name in ("node_obj1", "node_obj2", "node_obj3")]) - driver = "foo" - properties = "fake_prop" - fake_params = { - "sort_dir": "foo1", - "associated": "foo2", - "detail": True, - "maintenance": "foo5", - "fake_parameter1": "foo7" - } - - # Positive case: - scenario.run(driver, properties, **fake_params) - - scenario._create_node.assert_called_once_with(driver, properties, - fake_parameter1="foo7") - scenario._list_nodes.assert_called_once_with( - sort_dir="foo1", associated="foo2", detail=True, - maintenance="foo5") - - # Negative case: created node not in the list of available nodes - scenario._create_node = mock.Mock(uuid="foooo") - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, driver, properties, **fake_params) - - scenario._create_node.assert_called_with(driver, properties, - fake_parameter1="foo7") - scenario._list_nodes.assert_called_with( - sort_dir="foo1", associated="foo2", detail=True, - maintenance="foo5") - - def test_create_and_delete_node(self): - fake_node = mock.Mock(uuid="fake_uuid") - scenario = nodes.CreateAndDeleteNode(self.context) - scenario._create_node = mock.Mock(return_value=fake_node) - scenario._delete_node = mock.Mock() - - driver = "fake" - properties = "fake_prop" - - scenario.run(driver, properties, fake_parameter1="fake1", - fake_parameter2="fake2") - scenario._create_node.assert_called_once_with( - driver, properties, fake_parameter1="fake1", - fake_parameter2="fake2") - - scenario._delete_node.assert_called_once_with( - scenario._create_node.return_value) diff --git a/tests/unit/plugins/openstack/scenarios/ironic/test_utils.py b/tests/unit/plugins/openstack/scenarios/ironic/test_utils.py deleted file mode 100644 index 609fe9fed8..0000000000 --- a/tests/unit/plugins/openstack/scenarios/ironic/test_utils.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.ironic import utils -from tests.unit import test - -IRONIC_UTILS = "rally.plugins.openstack.scenarios.ironic.utils" - - -class IronicScenarioTestCase(test.ScenarioTestCase): - - @mock.patch("%s.utils.wait_for_status" % IRONIC_UTILS) - def test__create_node(self, mock_wait_for_status): - self.admin_clients("ironic").node.create.return_value = "fake_node" - scenario = utils.IronicScenario(self.context) - scenario.generate_random_name = mock.Mock() - - scenario._create_node(driver="fake", properties="fake_prop", - fake_param="foo") - - self.admin_clients("ironic").node.create.assert_called_once_with( - driver="fake", properties="fake_prop", fake_param="foo", - name=scenario.generate_random_name.return_value) - self.assertTrue(mock_wait_for_status.called) - self._test_atomic_action_timer(scenario.atomic_actions(), - "ironic.create_node") - - @mock.patch("%s.utils.wait_for_status" % IRONIC_UTILS) - def test__delete_node(self, mock_wait_for_status): - mock_node_delete = mock.Mock() - self.admin_clients("ironic").node.delete = mock_node_delete - scenario = utils.IronicScenario(self.context) - scenario._delete_node(mock.Mock(uuid="fake_id")) - self.assertTrue(mock_wait_for_status.called) - - self.admin_clients("ironic").node.delete.assert_called_once_with( - "fake_id") - self._test_atomic_action_timer(scenario.atomic_actions(), - "ironic.delete_node") - - def test__list_nodes(self): - self.admin_clients("ironic").node.list.return_value = ["fake"] - scenario = utils.IronicScenario(self.context) - fake_params = { - "sort_dir": "foo1", - "associated": "foo2", - "detail": True, - "maintenance": "foo5" - } - return_nodes_list = scenario._list_nodes(**fake_params) - self.assertEqual(["fake"], return_nodes_list) - self.admin_clients("ironic").node.list.assert_called_once_with( - sort_dir="foo1", associated="foo2", detail=True, - maintenance="foo5") - self._test_atomic_action_timer(scenario.atomic_actions(), - "ironic.list_nodes") diff --git a/tests/unit/plugins/openstack/scenarios/keystone/__init__.py b/tests/unit/plugins/openstack/scenarios/keystone/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/keystone/test_basic.py b/tests/unit/plugins/openstack/scenarios/keystone/test_basic.py deleted file mode 100755 index fe1f9b9b6d..0000000000 --- a/tests/unit/plugins/openstack/scenarios/keystone/test_basic.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.keystone import basic -from tests.unit import test - - -@ddt.ddt -class KeystoneBasicTestCase(test.ScenarioTestCase): - - def get_test_context(self): - context = super(KeystoneBasicTestCase, self).get_test_context() - context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "tenant": {"id": "fake_tenant_id", - "name": "fake_tenant_name"} - }) - return context - - def setUp(self): - super(KeystoneBasicTestCase, self).setUp() - patch = mock.patch( - "rally.plugins.openstack.services.identity.identity.Identity") - self.addCleanup(patch.stop) - self.mock_identity = patch.start() - - def test_create_user(self): - scenario = basic.CreateUser(self.context) - - scenario.run(password="tttt", project_id="id") - self.mock_identity.return_value.create_user.assert_called_once_with( - password="tttt", project_id="id") - - def test_create_delete_user(self): - identity_service = self.mock_identity.return_value - - fake_email = "abcd" - fake_user = identity_service.create_user.return_value - - scenario = basic.CreateDeleteUser(self.context) - - scenario.run(email=fake_email, enabled=True) - - identity_service.create_user.assert_called_once_with( - email=fake_email, enabled=True) - identity_service.delete_user.assert_called_once_with(fake_user.id) - - def test_create_user_set_enabled_and_delete(self): - identity_service = self.mock_identity.return_value - - scenario = basic.CreateUserSetEnabledAndDelete(self.context) - - fake_email = "abcd" - fake_user = identity_service.create_user.return_value - scenario.run(enabled=True, email=fake_email) - - identity_service.create_user.assert_called_once_with( - email=fake_email, enabled=True) - identity_service.update_user.assert_called_once_with( - fake_user.id, enabled=False) - identity_service.delete_user.assert_called_once_with(fake_user.id) - - def test_user_authenticate_and_validate_token(self): - identity_service = self.mock_identity.return_value - scenario = basic.AuthenticateUserAndValidateToken(self.context) - - fake_token = identity_service.fetch_token.return_value - - scenario.run() - - identity_service.fetch_token.assert_called_once_with() - identity_service.validate_token.assert_called_once_with(fake_token) - - def test_create_tenant(self): - scenario = basic.CreateTenant(self.context) - - scenario.run(enabled=True) - - self.mock_identity.return_value.create_project.assert_called_once_with( - enabled=True) - - def test_create_tenant_with_users(self): - identity_service = self.mock_identity.return_value - - fake_project = identity_service.create_project.return_value - number_of_users = 1 - - scenario = basic.CreateTenantWithUsers(self.context) - - scenario.run(users_per_tenant=number_of_users, enabled=True) - - identity_service.create_project.assert_called_once_with(enabled=True) - identity_service.create_users.assert_called_once_with( - fake_project.id, number_of_users=number_of_users) - - def test_create_and_list_users(self): - scenario = basic.CreateAndListUsers(self.context) - - passwd = "tttt" - project_id = "id" - - scenario.run(password=passwd, project_id=project_id) - self.mock_identity.return_value.create_user.assert_called_once_with( - password=passwd, project_id=project_id) - self.mock_identity.return_value.list_users.assert_called_once_with() - - def test_create_and_list_tenants(self): - identity_service = self.mock_identity.return_value - scenario = basic.CreateAndListTenants(self.context) - scenario.run(enabled=True) - identity_service.create_project.assert_called_once_with(enabled=True) - identity_service.list_projects.assert_called_once_with() - - def test_assign_and_remove_user_role(self): - fake_tenant = self.context["tenant"]["id"] - fake_user = self.context["user"]["id"] - fake_role = mock.MagicMock() - - self.mock_identity.return_value.create_role.return_value = fake_role - - scenario = basic.AddAndRemoveUserRole(self.context) - scenario.run() - - self.mock_identity.return_value.create_role.assert_called_once_with() - self.mock_identity.return_value.add_role.assert_called_once_with( - role_id=fake_role.id, user_id=fake_user, project_id=fake_tenant) - - self.mock_identity.return_value.revoke_role.assert_called_once_with( - fake_role.id, user_id=fake_user, project_id=fake_tenant) - - def test_create_and_delete_role(self): - fake_role = mock.MagicMock() - self.mock_identity.return_value.create_role.return_value = fake_role - - scenario = basic.CreateAndDeleteRole(self.context) - scenario.run() - - self.mock_identity.return_value.create_role.assert_called_once_with() - self.mock_identity.return_value.delete_role.assert_called_once_with( - fake_role.id) - - def test_create_and_get_role(self): - fake_role = mock.MagicMock() - self.mock_identity.return_value.create_role.return_value = fake_role - - scenario = basic.CreateAndGetRole(self.context) - scenario.run() - - self.mock_identity.return_value.create_role.assert_called_once_with() - self.mock_identity.return_value.get_role.assert_called_once_with( - fake_role.id) - - def test_create_and_list_user_roles(self): - scenario = basic.CreateAddAndListUserRoles(self.context) - fake_tenant = self.context["tenant"]["id"] - fake_user = self.context["user"]["id"] - fake_role = mock.MagicMock() - self.mock_identity.return_value.create_role.return_value = fake_role - - scenario.run() - - self.mock_identity.return_value.create_role.assert_called_once_with() - self.mock_identity.return_value.add_role.assert_called_once_with( - user_id=fake_user, role_id=fake_role.id, project_id=fake_tenant) - self.mock_identity.return_value.list_roles.assert_called_once_with( - user_id=fake_user, project_id=fake_tenant) - - def test_create_and_list_roles(self): - # Positive case - scenario = basic.CreateAddListRoles(self.context) - create_kwargs = {"fakewargs": "name"} - list_kwargs = {"fakewargs": "f"} - self.mock_identity.return_value.create_role = mock.Mock( - return_value="role1") - self.mock_identity.return_value.list_roles = mock.Mock( - return_value=("role1", "role2")) - scenario.run(create_role_kwargs=create_kwargs, - list_role_kwargs=list_kwargs) - self.mock_identity.return_value.create_role.assert_called_once_with( - **create_kwargs) - self.mock_identity.return_value.list_roles.assert_called_once_with( - **list_kwargs) - - # Negative case 1: role isn't created - self.mock_identity.return_value.create_role.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, create_role_kwargs=create_kwargs, - list_role_kwargs=list_kwargs) - self.mock_identity.return_value.create_role.assert_called_with( - **create_kwargs) - - # Negative case 2: role was created but included into list - self.mock_identity.return_value.create_role.return_value = "role3" - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, create_role_kwargs=create_kwargs, - list_role_kwargs=list_kwargs) - self.mock_identity.return_value.create_role.assert_called_with( - **create_kwargs) - self.mock_identity.return_value.list_roles.assert_called_with( - **list_kwargs) - - @ddt.data(None, "keystone", "fooservice") - def test_get_entities(self, service_name): - identity_service = self.mock_identity.return_value - - fake_project = identity_service.create_project.return_value - fake_user = identity_service.create_user.return_value - fake_role = identity_service.create_role.return_value - fake_service = identity_service.create_service.return_value - - scenario = basic.GetEntities(self.context) - - scenario.run(service_name) - - identity_service.create_project.assert_called_once_with() - identity_service.create_user.assert_called_once_with( - project_id=fake_project.id) - identity_service.create_role.assert_called_once_with() - - identity_service.get_project.assert_called_once_with(fake_project.id) - identity_service.get_user.assert_called_once_with(fake_user.id) - identity_service.get_role.assert_called_once_with(fake_role.id) - - if service_name is None: - identity_service.create_service.assert_called_once_with() - self.assertFalse(identity_service.get_service_by_name.called) - identity_service.get_service.assert_called_once_with( - fake_service.id) - else: - identity_service.get_service_by_name.assert_called_once_with( - service_name) - self.assertFalse(identity_service.create_service.called) - identity_service.get_service.assert_called_once_with( - identity_service.get_service_by_name.return_value.id) - - def test_create_and_delete_service(self): - identity_service = self.mock_identity.return_value - scenario = basic.CreateAndDeleteService(self.context) - - service_type = "test_service_type" - description = "test_description" - fake_service = identity_service.create_service.return_value - - scenario.run(service_type=service_type, description=description) - - identity_service.create_service.assert_called_once_with( - service_type=service_type, description=description) - identity_service.delete_service.assert_called_once_with( - fake_service.id) - - def test_create_update_and_delete_tenant(self): - identity_service = self.mock_identity.return_value - - scenario = basic.CreateUpdateAndDeleteTenant(self.context) - - gen_name = mock.MagicMock() - basic.CreateUpdateAndDeleteTenant.generate_random_name = gen_name - fake_project = identity_service.create_project.return_value - - scenario.run() - - identity_service.create_project.assert_called_once_with() - identity_service.update_project.assert_called_once_with( - fake_project.id, description=gen_name.return_value, - name=gen_name.return_value) - identity_service.delete_project(fake_project.id) - - def test_create_user_update_password(self): - identity_service = self.mock_identity.return_value - - scenario = basic.CreateUserUpdatePassword(self.context) - - fake_password = "pswd" - fake_user = identity_service.create_user.return_value - scenario.generate_random_name = mock.MagicMock( - return_value=fake_password) - - scenario.run() - - scenario.generate_random_name.assert_called_once_with() - identity_service.create_user.assert_called_once_with() - identity_service.update_user.assert_called_once_with( - fake_user.id, password=fake_password) - - def test_create_and_update_user(self): - identity_service = self.mock_identity.return_value - - scenario = basic.CreateAndUpdateUser(self.context) - scenario.admin_clients("keystone").users.get = mock.MagicMock() - fake_user = identity_service.create_user.return_value - - create_args = {"fakearg1": "f"} - update_args = {"fakearg1": "fakearg"} - setattr(self.admin_clients("keystone").users.get.return_value, - "fakearg1", "fakearg") - - scenario.run(create_user_kwargs=create_args, - update_user_kwargs=update_args) - - identity_service.create_user.assert_called_once_with(**create_args) - identity_service.update_user.assert_called_once_with( - fake_user.id, **update_args) - - def test_create_and_list_services(self): - identity_service = self.mock_identity.return_value - - scenario = basic.CreateAndListServices(self.context) - service_type = "test_service_type" - description = "test_description" - - scenario.run(service_type=service_type, description=description) - - identity_service.create_service.assert_called_once_with( - service_type=service_type, description=description) - identity_service.list_services.assert_called_once_with() - - def test_create_and_list_ec2credentials(self): - identity_service = self.mock_identity.return_value - - scenario = basic.CreateAndListEc2Credentials(self.context) - - scenario.run() - - identity_service.create_ec2credentials.assert_called_once_with( - self.context["user"]["id"], - project_id=self.context["tenant"]["id"]) - identity_service.list_ec2credentials.assert_called_with( - self.context["user"]["id"]) - - def test_create_and_delete_ec2credential(self): - identity_service = self.mock_identity.return_value - - fake_creds = identity_service.create_ec2credentials.return_value - - scenario = basic.CreateAndDeleteEc2Credential(self.context) - - scenario.run() - - identity_service.create_ec2credentials.assert_called_once_with( - self.context["user"]["id"], - project_id=self.context["tenant"]["id"]) - identity_service.delete_ec2credential.assert_called_once_with( - self.context["user"]["id"], access=fake_creds.access) - - def test_add_and_remove_user_role(self): - context = self.context - tenant_id = context["tenant"]["id"] - user_id = context["user"]["id"] - - fake_role = mock.MagicMock() - self.mock_identity.return_value.create_role.return_value = fake_role - - scenario = basic.AddAndRemoveUserRole(context) - scenario.run() - - self.mock_identity.return_value.create_role.assert_called_once_with() - self.mock_identity.return_value.add_role.assert_called_once_with( - role_id=fake_role.id, user_id=user_id, project_id=tenant_id) - self.mock_identity.return_value.revoke_role.assert_called_once_with( - fake_role.id, user_id=user_id, project_id=tenant_id) diff --git a/tests/unit/plugins/openstack/scenarios/keystone/test_utils.py b/tests/unit/plugins/openstack/scenarios/keystone/test_utils.py deleted file mode 100644 index b0c10baa62..0000000000 --- a/tests/unit/plugins/openstack/scenarios/keystone/test_utils.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.keystone import utils -from tests.unit import fakes -from tests.unit import test - -UTILS = "rally.plugins.openstack.scenarios.keystone.utils." - - -@ddt.ddt -class KeystoneScenarioTestCase(test.ScenarioTestCase): - - @mock.patch("uuid.uuid4", return_value="pwd") - def test_user_create(self, mock_uuid4): - scenario = utils.KeystoneScenario(self.context) - scenario.generate_random_name = mock.Mock(return_value="foobarov") - result = scenario._user_create() - - self.assertEqual( - self.admin_clients("keystone").users.create.return_value, result) - self.admin_clients("keystone").users.create.assert_called_once_with( - "foobarov", - password=mock_uuid4.return_value, - email="foobarov@rally.me") - mock_uuid4.assert_called_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.create_user") - - def test_update_user_enabled(self): - user = mock.Mock() - enabled = mock.Mock() - scenario = utils.KeystoneScenario(self.context) - - scenario._update_user_enabled(user, enabled) - self.admin_clients( - "keystone").users.update_enabled.assert_called_once_with(user, - enabled) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.update_user_enabled") - - def test_token_validate(self): - token = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - - scenario._token_validate(token) - self.admin_clients( - "keystone").tokens.validate.assert_called_once_with(token) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.validate_token") - - def test_token_authenticate(self): - name = mock.MagicMock() - psswd = "foopsswd" - tenant_id = mock.MagicMock() - tenant_name = mock.MagicMock() - - scenario = utils.KeystoneScenario(self.context) - scenario._authenticate_token(name, psswd, tenant_id, tenant_name) - self.admin_clients( - "keystone").tokens.authenticate.assert_called_once_with( - name, tenant_id, tenant_name, "foopsswd") - - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.token_authenticate") - - @mock.patch("rally.plugins.openstack.wrappers.keystone.wrap") - def test_role_create(self, mock_wrap, **kwargs): - role = mock.MagicMock() - mock_wrap.return_value.create_role.return_value = role - scenario = utils.KeystoneScenario(self.context) - scenario.generate_random_name = mock.MagicMock() - return_role = scenario._role_create(**kwargs) - - self.assertEqual(role, return_role) - - mock_wrap.assert_called_once_with(scenario.admin_clients("keystone")) - mock_wrap.return_value.create_role.assert_called_once_with( - scenario.generate_random_name.return_value, **kwargs) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.create_role") - - @mock.patch("rally.plugins.openstack.wrappers.keystone.wrap") - def test_role_delete(self, mock_wrap): - role = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - scenario._role_delete(role.id) - - mock_wrap.assert_called_once_with(scenario.admin_clients("keystone")) - mock_wrap.return_value.delete_role.assert_called_once_with(role.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.role_delete") - - def test_list_roles_for_user(self): - user = mock.MagicMock() - tenant = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - - scenario._list_roles_for_user(user, tenant) - - self.admin_clients( - "keystone").roles.roles_for_user.assert_called_once_with(user, - tenant) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.list_roles") - - def test_role_add(self): - user = mock.MagicMock() - role = mock.MagicMock() - tenant = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - - scenario._role_add(user=user.id, role=role.id, tenant=tenant.id) - - self.admin_clients( - "keystone").roles.add_user_role.assert_called_once_with(user.id, - role.id, - tenant.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.add_role") - - def test_user_delete(self): - resource = fakes.FakeResource() - resource.delete = mock.MagicMock() - - scenario = utils.KeystoneScenario(self.context) - scenario._resource_delete(resource) - resource.delete.assert_called_once_with() - r = "keystone.delete_%s" % resource.__class__.__name__.lower() - self._test_atomic_action_timer(scenario.atomic_actions(), r) - - def test_role_remove(self): - user = mock.MagicMock() - role = mock.MagicMock() - tenant = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - - scenario._role_remove(user=user, role=role, tenant=tenant) - - self.admin_clients( - "keystone").roles.remove_user_role.assert_called_once_with(user, - role, - tenant) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.remove_role") - - def test_tenant_create(self): - scenario = utils.KeystoneScenario(self.context) - scenario.generate_random_name = mock.Mock() - result = scenario._tenant_create() - - self.assertEqual( - self.admin_clients("keystone").tenants.create.return_value, result) - self.admin_clients("keystone").tenants.create.assert_called_once_with( - scenario.generate_random_name.return_value) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.create_tenant") - - @ddt.data( - {"service_type": "service_type"}, - {"service_type": None} - ) - def test_service_create(self, service_type): - scenario = utils.KeystoneScenario(self.context) - scenario.generate_random_name = mock.Mock() - - result = scenario._service_create( - service_type=service_type, description="description") - - self.assertEqual( - self.admin_clients("keystone").services.create.return_value, - result) - if service_type == "service_type": - self.admin_clients( - "keystone").services.create.assert_called_once_with( - scenario.generate_random_name.return_value, - service_type, description="description") - elif service_type is None: - self.admin_clients( - "keystone").services.create.assert_called_once_with( - scenario.generate_random_name.return_value, - "rally_test_type", description="description") - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.create_service") - - def test_tenant_create_with_users(self): - tenant = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - scenario.generate_random_name = mock.Mock(return_value="foobarov") - - scenario._users_create(tenant, users_per_tenant=1) - - self.admin_clients("keystone").users.create.assert_called_once_with( - "foobarov", password="foobarov", email="foobarov@rally.me", - tenant_id=tenant.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.create_users") - - def test_list_users(self): - scenario = utils.KeystoneScenario(self.context) - scenario._list_users() - self.admin_clients("keystone").users.list.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.list_users") - - def test_list_tenants(self): - scenario = utils.KeystoneScenario(self.context) - scenario._list_tenants() - self.admin_clients("keystone").tenants.list.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.list_tenants") - - def test_list_services(self): - scenario = utils.KeystoneScenario(self.context) - scenario._list_services() - - self.admin_clients("keystone").services.list.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.service_list") - - def test_delete_service(self): - service = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - scenario._delete_service(service_id=service.id) - - self.admin_clients("keystone").services.delete.assert_called_once_with( - service.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.delete_service") - - def test_get_tenant(self): - tenant = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - scenario._get_tenant(tenant_id=tenant.id) - - self.admin_clients("keystone").tenants.get.assert_called_once_with( - tenant.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.get_tenant") - - def test_get_user(self): - user = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - scenario._get_user(user_id=user.id) - - self.admin_clients("keystone").users.get.assert_called_once_with( - user.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.get_user") - - def test_get_role(self): - role = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - scenario._get_role(role_id=role.id) - - self.admin_clients("keystone").roles.get.assert_called_once_with( - role.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.get_role") - - def test_get_service(self): - service = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - scenario._get_service(service_id=service.id) - - self.admin_clients("keystone").services.get.assert_called_once_with( - service.id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.get_service") - - def test_update_tenant(self): - tenant = mock.MagicMock() - description = "new description" - - scenario = utils.KeystoneScenario(self.context) - scenario.generate_random_name = mock.Mock() - scenario._update_tenant(tenant=tenant, description=description) - - self.admin_clients("keystone").tenants.update.assert_called_once_with( - tenant.id, scenario.generate_random_name.return_value, - description) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.update_tenant") - - def test_update_user_password(self): - password = "pswd" - user = mock.MagicMock() - scenario = utils.KeystoneScenario(self.context) - - scenario._update_user_password(password=password, user_id=user.id) - - self.admin_clients( - "keystone").users.update_password.assert_called_once_with(user.id, - password) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.update_user_password") - - @mock.patch("rally.plugins.openstack.scenario.OpenStackScenario." - "admin_clients") - def test_update_user_password_v3(self, - mock_open_stack_scenario_admin_clients): - password = "pswd" - user = mock.MagicMock() - scenario = utils.KeystoneScenario() - - type(mock_open_stack_scenario_admin_clients.return_value).version = ( - mock.PropertyMock(return_value="v3")) - scenario._update_user_password(password=password, user_id=user.id) - - mock_open_stack_scenario_admin_clients( - "keystone").users.update.assert_called_once_with( - user.id, password=password) - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.update_user_password") - - def test_get_service_by_name(self): - scenario = utils.KeystoneScenario(self.context) - svc_foo, svc_bar = mock.Mock(), mock.Mock() - scenario._list_services = mock.Mock(return_value=[svc_foo, svc_bar]) - self.assertEqual(scenario._get_service_by_name(svc_bar.name), svc_bar) - self.assertIsNone(scenario._get_service_by_name("spam")) - - @mock.patch(UTILS + "KeystoneScenario.clients") - def test_create_ec2credentials(self, mock_clients): - scenario = utils.KeystoneScenario(self.context) - creds = mock.Mock() - mock_clients("keystone").ec2.create.return_value = creds - create_creds = scenario._create_ec2credentials("user_id", - "tenant_id") - self.assertEqual(create_creds, creds) - mock_clients("keystone").ec2.create.assert_called_once_with( - "user_id", "tenant_id") - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.create_ec2creds") - - @mock.patch(UTILS + "KeystoneScenario.clients") - def test_list_ec2credentials(self, mock_clients): - scenario = utils.KeystoneScenario(self.context) - creds_list = mock.Mock() - mock_clients("keystone").ec2.list.return_value = creds_list - list_creds = scenario._list_ec2credentials("user_id") - self.assertEqual(list_creds, creds_list) - mock_clients("keystone").ec2.list.assert_called_once_with("user_id") - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.list_ec2creds") - - @mock.patch(UTILS + "KeystoneScenario.clients") - def test_delete_ec2credentials(self, mock_clients): - scenario = utils.KeystoneScenario(self.context) - mock_clients("keystone").ec2.delete = mock.MagicMock() - scenario._delete_ec2credential("user_id", "access") - mock_clients("keystone").ec2.delete.assert_called_once_with("user_id", - "access") - self._test_atomic_action_timer(scenario.atomic_actions(), - "keystone.delete_ec2creds") diff --git a/tests/unit/plugins/openstack/scenarios/magnum/__init__.py b/tests/unit/plugins/openstack/scenarios/magnum/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/magnum/test_cluster_templates.py b/tests/unit/plugins/openstack/scenarios/magnum/test_cluster_templates.py deleted file mode 100644 index b837c0c3a8..0000000000 --- a/tests/unit/plugins/openstack/scenarios/magnum/test_cluster_templates.py +++ /dev/null @@ -1,35 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.magnum import cluster_templates -from tests.unit import test - - -@ddt.ddt -class MagnumClusterTemplatesTestCase(test.TestCase): - - @ddt.data( - {"kwargs": {}}, - {"kwargs": {"fakearg": "f"}}) - @ddt.unpack - def test_list_cluster_templates(self, kwargs): - scenario = cluster_templates.ListClusterTemplates() - scenario._list_cluster_templates = mock.Mock() - - scenario.run(**kwargs) - - scenario._list_cluster_templates.assert_called_once_with(**kwargs) diff --git a/tests/unit/plugins/openstack/scenarios/magnum/test_clusters.py b/tests/unit/plugins/openstack/scenarios/magnum/test_clusters.py deleted file mode 100644 index 62f380c424..0000000000 --- a/tests/unit/plugins/openstack/scenarios/magnum/test_clusters.py +++ /dev/null @@ -1,124 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.magnum import clusters -from tests.unit import test - - -@ddt.ddt -class MagnumClustersTestCase(test.ScenarioTestCase): - - @staticmethod - def _get_context(): - context = test.get_test_context() - context.update({ - "tenant": { - "id": "rally_tenant_id" - }, - "user": {"id": "fake_user_id", - "credential": mock.MagicMock()}, - "config": {} - }) - return context - - @ddt.data( - {"kwargs": {}}, - {"kwargs": {"fakearg": "f"}}) - def test_list_clusters(self, kwargs): - scenario = clusters.ListClusters() - scenario._list_clusters = mock.Mock() - - scenario.run(**kwargs) - - scenario._list_clusters.assert_called_once_with(**kwargs) - - def test_create_cluster_with_existing_ct_and_list_clusters(self): - context = self._get_context() - scenario = clusters.CreateAndListClusters(context) - kwargs = {"fakearg": "f"} - fake_cluster1 = mock.Mock(uuid="a") - fake_cluster2 = mock.Mock(uuid="b") - fake_cluster3 = mock.Mock(uuid="c") - scenario._create_cluster = mock.Mock(return_value=fake_cluster1) - scenario._list_clusters = mock.Mock(return_value=[fake_cluster1, - fake_cluster2, - fake_cluster3]) - - run_kwargs = kwargs.copy() - run_kwargs["cluster_template_uuid"] = "existing_cluster_template_uuid" - # Positive case - scenario.run(2, **run_kwargs) - - scenario._create_cluster.assert_called_once_with( - "existing_cluster_template_uuid", 2, keypair=mock.ANY, **kwargs) - scenario._list_clusters.assert_called_once_with(**kwargs) - - # Negative case1: cluster isn't created - scenario._create_cluster.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, 2, **run_kwargs) - scenario._create_cluster.assert_called_with( - "existing_cluster_template_uuid", 2, keypair=mock.ANY, **kwargs) - - # Negative case2: created cluster not in the list of available clusters - scenario._create_cluster.return_value = mock.Mock(uuid="foo") - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, 2, **run_kwargs) - scenario._create_cluster.assert_called_with( - "existing_cluster_template_uuid", 2, keypair=mock.ANY, **kwargs) - scenario._list_clusters.assert_called_with(**kwargs) - - def test_create_and_list_clusters(self): - context = self._get_context() - context.update({ - "tenant": { - "cluster_template": "rally_cluster_template_uuid" - } - }) - - scenario = clusters.CreateAndListClusters(context) - fake_cluster1 = mock.Mock(uuid="a") - fake_cluster2 = mock.Mock(uuid="b") - fake_cluster3 = mock.Mock(uuid="c") - kwargs = {"fakearg": "f"} - scenario._create_cluster = mock.Mock(return_value=fake_cluster1) - scenario._list_clusters = mock.Mock(return_value=[fake_cluster1, - fake_cluster2, - fake_cluster3]) - - # Positive case - scenario.run(2, **kwargs) - - scenario._create_cluster.assert_called_once_with( - "rally_cluster_template_uuid", 2, keypair=mock.ANY, **kwargs) - scenario._list_clusters.assert_called_once_with(**kwargs) - - # Negative case1: cluster isn't created - scenario._create_cluster.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, 2, **kwargs) - scenario._create_cluster.assert_called_with( - "rally_cluster_template_uuid", 2, keypair=mock.ANY, **kwargs) - - # Negative case2: created cluster not in the list of available clusters - scenario._create_cluster.return_value = mock.Mock(uuid="foo") - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, 2, **kwargs) - scenario._create_cluster.assert_called_with( - "rally_cluster_template_uuid", 2, keypair=mock.ANY, **kwargs) - scenario._list_clusters.assert_called_with(**kwargs) diff --git a/tests/unit/plugins/openstack/scenarios/magnum/test_k8s_pods.py b/tests/unit/plugins/openstack/scenarios/magnum/test_k8s_pods.py deleted file mode 100644 index 236557efab..0000000000 --- a/tests/unit/plugins/openstack/scenarios/magnum/test_k8s_pods.py +++ /dev/null @@ -1,104 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.magnum import k8s_pods -from tests.unit import test - - -@ddt.ddt -class K8sPodsTestCase(test.ScenarioTestCase): - - def test_list_pods(self): - scenario = k8s_pods.ListPods() - scenario._list_v1pods = mock.Mock() - - scenario.run() - - scenario._list_v1pods.assert_called_once_with() - - @ddt.data(["manifest.json"], ["manifest.yaml"]) - def test_create_pods(self, manifests): - manifest = manifests[0] - scenario = k8s_pods.CreatePods() - file_content = "data: fake_content" - if manifest == "manifest.json": - file_content = "{\"data\": \"fake_content\"}" - file_mock = mock.mock_open(read_data=file_content) - fake_pod = mock.Mock() - scenario._create_v1pod = mock.MagicMock(return_value=fake_pod) - - with mock.patch( - "rally.plugins.openstack.scenarios.magnum.k8s_pods.open", - file_mock, create=True) as m: - scenario.run(manifests) - - m.assert_called_once_with(manifest, "r") - m.return_value.read.assert_called_once_with() - scenario._create_v1pod.assert_called_once_with( - {"data": "fake_content"}) - - # test error cases: - # 1. pod not created - scenario._create_v1pod = mock.MagicMock(return_value=None) - - with mock.patch( - "rally.plugins.openstack.scenarios.magnum.k8s_pods.open", - file_mock, create=True) as m: - self.assertRaises( - exceptions.RallyAssertionError, - scenario.run, manifests) - - m.assert_called_with(manifest, "r") - m.return_value.read.assert_called_with() - scenario._create_v1pod.assert_called_with( - {"data": "fake_content"}) - - @ddt.data(["manifest.json"], ["manifest.yaml"]) - def test_create_rcs(self, manifests): - manifest = manifests[0] - scenario = k8s_pods.CreateRcs() - file_content = "data: fake_content" - if manifest == "manifest.json": - file_content = "{\"data\": \"fake_content\"}" - file_mock = mock.mock_open(read_data=file_content) - fake_rc = mock.Mock() - scenario._create_v1rc = mock.MagicMock(return_value=fake_rc) - - with mock.patch( - "rally.plugins.openstack.scenarios.magnum.k8s_pods.open", - file_mock, create=True) as m: - scenario.run(manifests) - - m.assert_called_once_with(manifest, "r") - m.return_value.read.assert_called_once_with() - scenario._create_v1rc.assert_called_once_with({"data": "fake_content"}) - - # test error cases: - # 1. rc not created - scenario._create_v1rc = mock.MagicMock(return_value=None) - - with mock.patch( - "rally.plugins.openstack.scenarios.magnum.k8s_pods.open", - file_mock, create=True) as m: - self.assertRaises( - exceptions.RallyAssertionError, - scenario.run, manifests) - - m.assert_called_with(manifest, "r") - m.return_value.read.assert_called_with() - scenario._create_v1rc.assert_called_with({"data": "fake_content"}) diff --git a/tests/unit/plugins/openstack/scenarios/magnum/test_utils.py b/tests/unit/plugins/openstack/scenarios/magnum/test_utils.py deleted file mode 100644 index 86fbc943ab..0000000000 --- a/tests/unit/plugins/openstack/scenarios/magnum/test_utils.py +++ /dev/null @@ -1,385 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import mock - -from kubernetes import client as kubernetes_client -from kubernetes.client import api_client -from kubernetes.client.rest import ApiException -from rally import exceptions -from rally.plugins.openstack.scenarios.magnum import utils -from tests.unit import test - -MAGNUM_UTILS = "rally.plugins.openstack.scenarios.magnum.utils" - -CONF = utils.CONF - - -class MagnumScenarioTestCase(test.ScenarioTestCase): - def setUp(self): - super(MagnumScenarioTestCase, self).setUp() - self.cluster_template = mock.Mock() - self.cluster = mock.Mock() - self.pod = mock.Mock() - self.scenario = utils.MagnumScenario(self.context) - - def test_list_cluster_templates(self): - fake_list = [self.cluster_template] - - self.clients("magnum").cluster_templates.list.return_value = fake_list - return_ct_list = self.scenario._list_cluster_templates() - self.assertEqual(fake_list, return_ct_list) - - self.clients("magnum").cluster_templates.list.assert_called_once_with() - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "magnum.list_cluster_templates") - - def test_create_cluster_template(self): - self.scenario.generate_random_name = mock.Mock( - return_value="generated_name") - fake_ct = self.cluster_template - self.clients("magnum").cluster_templates.create.return_value = fake_ct - - return_cluster_template = self.scenario._create_cluster_template( - image="test_image", - keypair="test_key", - external_network="public", - dns_nameserver="8.8.8.8", - flavor="m1.large", - docker_volume_size=50, - network_driver="docker", - coe="swarm") - - self.assertEqual(fake_ct, return_cluster_template) - _, kwargs = self.clients("magnum").cluster_templates.create.call_args - self.assertEqual("generated_name", kwargs["name"]) - - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "magnum.create_cluster_template") - - def test_get_cluster_template(self): - client = self.clients("magnum") - client.cluster_templates.get.return_value = self.cluster_template - return_cluster_template = self.scenario._get_cluster_template("uuid") - client.cluster_templates.get.assert_called_once_with("uuid") - self.assertEqual(self.cluster_template, return_cluster_template) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.get_cluster_template") - - def test_list_clusters(self): - return_clusters_list = self.scenario._list_clusters(limit="foo1") - client = self.clients("magnum") - client.clusters.list.assert_called_once_with(limit="foo1") - self.assertEqual(client.clusters.list.return_value, - return_clusters_list) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.list_clusters") - - def test_create_cluster(self): - self.scenario.generate_random_name = mock.Mock( - return_value="generated_name") - self.clients("magnum").clusters.create.return_value = self.cluster - return_cluster = self.scenario._create_cluster( - cluster_template="generated_uuid", node_count=2) - self.mock_wait_for_status.mock.assert_called_once_with( - self.cluster, - ready_statuses=["CREATE_COMPLETE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack. - magnum_cluster_create_poll_interval, - timeout=CONF.openstack.magnum_cluster_create_timeout, - id_attr="uuid") - _, kwargs = self.clients("magnum").clusters.create.call_args - self.assertEqual("generated_name", kwargs["name"]) - self.assertEqual("generated_uuid", kwargs["cluster_template_id"]) - self.mock_get_from_manager.mock.assert_called_once_with() - self.assertEqual( - self.mock_wait_for_status.mock.return_value, return_cluster) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.create_cluster") - - def test_get_cluster(self): - self.clients("magnum").clusters.get.return_value = self.cluster - return_cluster = self.scenario._get_cluster("uuid") - self.clients("magnum").clusters.get.assert_called_once_with("uuid") - self.assertEqual(self.cluster, return_cluster) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.get_cluster") - - def test_get_ca_certificate(self): - self.scenario._get_ca_certificate(self.cluster.uuid) - self.clients("magnum").certificates.get.assert_called_once_with( - self.cluster.uuid) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.get_ca_certificate") - - def test_create_ca_certificate(self): - csr_req = {"cluster_uuid": "uuid", "csr": "csr file"} - self.scenario._create_ca_certificate(csr_req) - self.clients("magnum").certificates.create.assert_called_once_with( - **csr_req) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.create_ca_certificate") - - @mock.patch("kubernetes.client.api_client.ApiClient") - @mock.patch("kubernetes.client.apis.core_v1_api.CoreV1Api") - def test_get_k8s_api_client_using_tls(self, mock_core_v1_api, - mock_api_client): - - if hasattr(kubernetes_client, "ConfigurationObject"): - # it is k8s-client < 4.0.0 - m = mock.patch("kubernetes.client.ConfigurationObject") - else: - m = mock.patch("kubernetes.client.Configuration") - - mock_configuration_object = m.start() - self.addCleanup(m.stop) - - self.context.update({ - "ca_certs_directory": "/home/stack", - "tenant": { - "id": "rally_tenant_id", - "cluster": "rally_cluster_uuid" - } - }) - self.scenario = utils.MagnumScenario(self.context) - cluster_uuid = self.context["tenant"]["cluster"] - client = self.clients("magnum") - client.clusters.get.return_value = self.cluster - cluster = self.scenario._get_cluster(cluster_uuid) - self.cluster_template.tls_disabled = False - client.cluster_templates.get.return_value = self.cluster_template - dir = self.context["ca_certs_directory"] - key_file = os.path.join(dir, cluster_uuid.__add__(".key")) - cert_file = os.path.join(dir, cluster_uuid.__add__(".crt")) - ca_certs = os.path.join(dir, cluster_uuid.__add__("_ca.crt")) - config = mock_configuration_object.return_value - config.host = cluster.api_address - config.ssl_ca_cert = ca_certs - config.cert_file = cert_file - config.key_file = key_file - _api_client = mock_api_client.return_value - self.scenario._get_k8s_api_client() - mock_configuration_object.assert_called_once_with() - mock_api_client.assert_called_once_with(config=config) - mock_core_v1_api.assert_called_once_with(_api_client) - - @mock.patch("kubernetes.client.api_client.ApiClient") - @mock.patch("kubernetes.client.apis.core_v1_api.CoreV1Api") - def test_get_k8s_api_client(self, mock_core_v1_api, mock_api_client): - - if hasattr(kubernetes_client, "ConfigurationObject"): - # it is k8s-client < 4.0.0 - m = mock.patch("kubernetes.client.ConfigurationObject") - else: - m = mock.patch("kubernetes.client.Configuration") - - mock_configuration_object = m.start() - self.addCleanup(m.stop) - - self.context.update({ - "tenant": { - "id": "rally_tenant_id", - "cluster": "rally_cluster_uuid" - } - }) - self.scenario = utils.MagnumScenario(self.context) - cluster_uuid = self.context["tenant"]["cluster"] - client = self.clients("magnum") - client.clusters.get.return_value = self.cluster - cluster = self.scenario._get_cluster(cluster_uuid) - self.cluster_template.tls_disabled = True - client.cluster_templates.get.return_value = self.cluster_template - config = mock_configuration_object.return_value - config.host = cluster.api_address - config.ssl_ca_cert = None - config.cert_file = None - config.key_file = None - _api_client = mock_api_client.return_value - self.scenario._get_k8s_api_client() - mock_configuration_object.assert_called_once_with() - mock_api_client.assert_called_once_with(config=config) - mock_core_v1_api.assert_called_once_with(_api_client) - - @mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client") - def test_list_v1pods(self, mock__get_k8s_api_client): - k8s_api = mock__get_k8s_api_client.return_value - self.scenario._list_v1pods() - k8s_api.list_node.assert_called_once_with( - namespace="default") - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.k8s_list_v1pods") - - @mock.patch("random.choice") - @mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client") - def test_create_v1pod(self, mock__get_k8s_api_client, - mock_random_choice): - k8s_api = mock__get_k8s_api_client.return_value - manifest = ( - {"apiVersion": "v1", "kind": "Pod", - "metadata": {"name": "nginx"}}) - podname = manifest["metadata"]["name"] + "-" - for i in range(5): - podname = podname + mock_random_choice.return_value - k8s_api.create_namespaced_pod = mock.MagicMock( - side_effect=[ApiException(status=403), self.pod]) - not_ready_pod = api_client.models.V1Pod() - not_ready_status = api_client.models.V1PodStatus() - not_ready_status.phase = "not_ready" - not_ready_pod.status = not_ready_status - almost_ready_pod = api_client.models.V1Pod() - almost_ready_status = api_client.models.V1PodStatus() - almost_ready_status.phase = "almost_ready" - almost_ready_pod.status = almost_ready_status - ready_pod = api_client.models.V1Pod() - ready_condition = api_client.models.V1PodCondition(status="True", - type="Ready") - ready_status = api_client.models.V1PodStatus() - ready_status.phase = "Running" - ready_status.conditions = [ready_condition] - ready_pod_metadata = api_client.models.V1ObjectMeta() - ready_pod_metadata.uid = "123456789" - ready_pod_spec = api_client.models.V1PodSpec( - node_name="host_abc", - containers=[] - ) - ready_pod.status = ready_status - ready_pod.metadata = ready_pod_metadata - ready_pod.spec = ready_pod_spec - k8s_api.read_namespaced_pod = mock.MagicMock( - side_effect=[not_ready_pod, almost_ready_pod, ready_pod]) - self.scenario._create_v1pod(manifest) - k8s_api.create_namespaced_pod.assert_called_with( - body=manifest, namespace="default") - k8s_api.read_namespaced_pod.assert_called_with( - name=podname, namespace="default") - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.k8s_create_v1pod") - - @mock.patch("time.time") - @mock.patch("random.choice") - @mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client") - def test_create_v1pod_timeout(self, mock__get_k8s_api_client, - mock_random_choice, mock_time): - k8s_api = mock__get_k8s_api_client.return_value - manifest = ( - {"apiVersion": "v1", "kind": "Pod", - "metadata": {"name": "nginx"}}) - k8s_api.create_namespaced_pod.return_value = self.pod - mock_time.side_effect = [1, 2, 3, 4, 5, 1800, 1801] - not_ready_pod = api_client.models.V1Pod() - not_ready_status = api_client.models.V1PodStatus() - not_ready_status.phase = "not_ready" - not_ready_pod_metadata = api_client.models.V1ObjectMeta() - not_ready_pod_metadata.uid = "123456789" - not_ready_pod.status = not_ready_status - not_ready_pod.metadata = not_ready_pod_metadata - k8s_api.read_namespaced_pod = mock.MagicMock( - side_effect=[not_ready_pod - for i in range(4)]) - - self.assertRaises( - exceptions.TimeoutException, - self.scenario._create_v1pod, manifest) - - @mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client") - def test_list_v1rcs(self, mock__get_k8s_api_client): - k8s_api = mock__get_k8s_api_client.return_value - self.scenario._list_v1rcs() - (k8s_api.list_namespaced_replication_controller - .assert_called_once_with(namespace="default")) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.k8s_list_v1rcs") - - @mock.patch("random.choice") - @mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client") - def test_create_v1rc(self, mock__get_k8s_api_client, - mock_random_choice): - k8s_api = mock__get_k8s_api_client.return_value - manifest = ( - {"apiVersion": "v1", - "kind": "ReplicationController", - "metadata": {"name": "nginx-controller"}, - "spec": {"replicas": 2, - "selector": {"name": "nginx"}, - "template": {"metadata": - {"labels": - {"name": "nginx"}}}}}) - suffix = "-" - for i in range(5): - suffix = suffix + mock_random_choice.return_value - rcname = manifest["metadata"]["name"] + suffix - rc = api_client.models.V1ReplicationController() - rc.spec = api_client.models.V1ReplicationControllerSpec() - rc.spec.replicas = manifest["spec"]["replicas"] - k8s_api.create_namespaced_replication_controller.return_value = rc - not_ready_rc = api_client.models.V1ReplicationController() - not_ready_rc_status = ( - api_client.models.V1ReplicationControllerStatus(replicas=0)) - not_ready_rc.status = not_ready_rc_status - ready_rc = api_client.models.V1ReplicationController() - ready_rc_status = api_client.models.V1ReplicationControllerStatus( - replicas=manifest["spec"]["replicas"] - ) - ready_rc_metadata = api_client.models.V1ObjectMeta() - ready_rc_metadata.uid = "123456789" - ready_rc_metadata.name = rcname - ready_rc.status = ready_rc_status - ready_rc.metadata = ready_rc_metadata - k8s_api.read_namespaced_replication_controller = mock.MagicMock( - side_effect=[not_ready_rc, ready_rc]) - self.scenario._create_v1rc(manifest) - (k8s_api.create_namespaced_replication_controller - .assert_called_once_with(body=manifest, namespace="default")) - (k8s_api.read_namespaced_replication_controller - .assert_called_with(name=rcname, namespace="default")) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "magnum.k8s_create_v1rc") - - @mock.patch("time.time") - @mock.patch("random.choice") - @mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client") - def test_create_v1rc_timeout(self, mock__get_k8s_api_client, - mock_random_choice, mock_time): - k8s_api = mock__get_k8s_api_client.return_value - manifest = ( - {"apiVersion": "v1", - "kind": "ReplicationController", - "metadata": {"name": "nginx-controller"}, - "spec": {"replicas": 2, - "selector": {"app": "nginx"}, - "template": {"metadata": - {"labels": - {"name": "nginx"}}}}}) - rc = api_client.models.V1ReplicationController() - rc.spec = api_client.models.V1ReplicationControllerSpec() - rc.spec.replicas = manifest["spec"]["replicas"] - mock_time.side_effect = [1, 2, 3, 4, 5, 1800, 1801] - k8s_api.create_namespaced_replication_controller.return_value = rc - not_ready_rc = api_client.models.V1ReplicationController() - not_ready_rc_status = ( - api_client.models.V1ReplicationControllerStatus(replicas=0)) - not_ready_rc_metadata = api_client.models.V1ObjectMeta() - not_ready_rc_metadata.uid = "123456789" - not_ready_rc.status = not_ready_rc_status - not_ready_rc.metadata = not_ready_rc_metadata - k8s_api.read_namespaced_replication_controller = mock.MagicMock( - side_effect=[not_ready_rc - for i in range(4)]) - - self.assertRaises( - exceptions.TimeoutException, - self.scenario._create_v1rc, manifest) diff --git a/tests/unit/plugins/openstack/scenarios/manila/__init__.py b/tests/unit/plugins/openstack/scenarios/manila/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/manila/test_shares.py b/tests/unit/plugins/openstack/scenarios/manila/test_shares.py deleted file mode 100644 index a2d32cc55d..0000000000 --- a/tests/unit/plugins/openstack/scenarios/manila/test_shares.py +++ /dev/null @@ -1,421 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.manila import shares -from tests.unit import test - - -@ddt.ddt -class ManilaSharesTestCase(test.ScenarioTestCase): - - @ddt.data( - {"share_proto": "nfs", "size": 3}, - {"share_proto": "cifs", "size": 4, - "share_network": "foo", "share_type": "bar"}, - ) - def test_create_and_delete_share(self, params): - fake_share = mock.MagicMock() - scenario = shares.CreateAndDeleteShare(self.context) - scenario._create_share = mock.MagicMock(return_value=fake_share) - scenario.sleep_between = mock.MagicMock() - scenario._delete_share = mock.MagicMock() - - scenario.run(min_sleep=3, max_sleep=4, **params) - - scenario._create_share.assert_called_once_with(**params) - scenario.sleep_between.assert_called_once_with(3, 4) - scenario._delete_share.assert_called_once_with(fake_share) - - @ddt.data( - {}, - {"detailed": True}, - {"detailed": False}, - {"search_opts": None}, - {"search_opts": {}}, - {"search_opts": {"foo": "bar"}}, - {"detailed": True, "search_opts": None}, - {"detailed": False, "search_opts": None}, - {"detailed": True, "search_opts": {"foo": "bar"}}, - {"detailed": False, "search_opts": {"quuz": "foo"}}, - ) - @ddt.unpack - def test_list_shares(self, detailed=True, search_opts=None): - scenario = shares.ListShares(self.context) - scenario._list_shares = mock.MagicMock() - - scenario.run(detailed=detailed, search_opts=search_opts) - - scenario._list_shares.assert_called_once_with( - detailed=detailed, search_opts=search_opts) - - @ddt.data( - {"params": {"share_proto": "nfs"}, "new_size": 4}, - { - "params": { - "share_proto": "cifs", - "size": 4, - "share_network": "foo", - "share_type": "bar", - "snapshot_id": "snapshot_foo", - "description": "foo_description", - "metadata": {"foo_metadata": "foo"}, - "share_network": "foo_network", - "share_type": "foo_type", - "is_public": True, - "availability_zone": "foo_avz", - "share_group_id": "foo_group_id" - }, - "new_size": 8 - } - ) - @ddt.unpack - def test_create_and_extend_shares(self, params, new_size): - size = params.get("size", 1) - share_group_id = params.get("share_group_id", None) - snapshot_id = params.get("snapshot_id", None) - description = params.get("description", None) - metadata = params.get("metadata", None) - share_network = params.get("share_network", None) - share_type = params.get("share_type", None) - is_public = params.get("is_public", False) - availability_zone = params.get("availability_zone", None) - - fake_share = mock.MagicMock() - scenario = shares.CreateAndExtendShare(self.context) - scenario._create_share = mock.MagicMock(return_value=fake_share) - scenario._extend_share = mock.MagicMock() - - scenario.run(new_size=new_size, **params) - - scenario._create_share.assert_called_with( - share_proto=params["share_proto"], - size=size, - snapshot_id=snapshot_id, - description=description, - metadata=metadata, - share_network=share_network, - share_type=share_type, - is_public=is_public, - availability_zone=availability_zone, - share_group_id=share_group_id - ) - scenario._extend_share.assert_called_with(fake_share, new_size) - - @ddt.data( - {"params": {"share_proto": "nfs"}, "new_size": 4}, - { - "params": { - "share_proto": "cifs", - "size": 4, - "share_network": "foo", - "share_type": "bar", - "snapshot_id": "snapshot_foo", - "description": "foo_description", - "metadata": {"foo_metadata": "foo"}, - "share_network": "foo_network", - "share_type": "foo_type", - "is_public": True, - "availability_zone": "foo_avz", - "share_group_id": "foo_group_id" - }, - "new_size": 8 - } - ) - @ddt.unpack - def test_create_and_shrink_shares(self, params, new_size): - size = params.get("size", 2) - share_group_id = params.get("share_group_id", None) - snapshot_id = params.get("snapshot_id", None) - description = params.get("description", None) - metadata = params.get("metadata", None) - share_network = params.get("share_network", None) - share_type = params.get("share_type", None) - is_public = params.get("is_public", False) - availability_zone = params.get("availability_zone", None) - - fake_share = mock.MagicMock() - scenario = shares.CreateAndShrinkShare(self.context) - scenario._create_share = mock.MagicMock(return_value=fake_share) - scenario._shrink_share = mock.MagicMock() - - scenario.run(new_size=new_size, **params) - - scenario._create_share.assert_called_with( - share_proto=params["share_proto"], - size=size, - snapshot_id=snapshot_id, - description=description, - metadata=metadata, - share_network=share_network, - share_type=share_type, - is_public=is_public, - availability_zone=availability_zone, - share_group_id=share_group_id - ) - scenario._shrink_share.assert_called_with(fake_share, new_size) - - @ddt.data( - { - "share_proto": "nfs", - "size": 3, - "access": "127.0.0.1", - "access_type": "ip" - }, - { - "access": "1.2.3.4", - "access_type": "ip", - "access_level": "ro", - "share_proto": "cifs", - "size": 4, - "share_network": "foo", - "share_type": "bar", - "snapshot_id": "snapshot_foo", - "description": "foo_description", - "metadata": {"foo_metadata": "foo"}, - "share_network": "foo_network", - "share_type": "foo_type", - "is_public": True, - "availability_zone": "foo_avz", - "share_group_id": "foo_group_id" - } - ) - def test_create_share_and_allow_and_deny_access(self, params): - access = params["access"] - access_type = params["access_type"] - access_level = params.get("access_level", "rw") - size = params.get("size", 1) - share_group_id = params.get("share_group_id", None) - snapshot_id = params.get("snapshot_id", None) - description = params.get("description", None) - metadata = params.get("metadata", None) - share_network = params.get("share_network", None) - share_type = params.get("share_type", None) - is_public = params.get("is_public", False) - availability_zone = params.get("availability_zone", None) - fake_share = mock.MagicMock() - fake_access = {"id": "foo"} - - scenario = shares.CreateShareThenAllowAndDenyAccess(self.context) - scenario._create_share = mock.MagicMock(return_value=fake_share) - scenario._allow_access_share = mock.MagicMock(return_value=fake_access) - scenario._deny_access_share = mock.MagicMock() - - scenario.run(**params) - - scenario._create_share.assert_called_with( - share_proto=params["share_proto"], - size=size, - snapshot_id=snapshot_id, - description=description, - metadata=metadata, - share_network=share_network, - share_type=share_type, - is_public=is_public, - availability_zone=availability_zone, - share_group_id=share_group_id - ) - scenario._allow_access_share.assert_called_with( - fake_share, access_type, access, access_level) - scenario._deny_access_share.assert_called_with( - fake_share, fake_access["id"]) - - @ddt.data( - {}, - {"description": "foo_description"}, - {"neutron_net_id": "foo_neutron_net_id"}, - {"neutron_subnet_id": "foo_neutron_subnet_id"}, - {"nova_net_id": "foo_nova_net_id"}, - {"description": "foo_description", - "neutron_net_id": "foo_neutron_net_id", - "neutron_subnet_id": "foo_neutron_subnet_id", - "nova_net_id": "foo_nova_net_id"}, - ) - def test_create_share_network_and_delete(self, params): - fake_sn = mock.MagicMock() - scenario = shares.CreateShareNetworkAndDelete(self.context) - scenario._create_share_network = mock.MagicMock(return_value=fake_sn) - scenario._delete_share_network = mock.MagicMock() - expected_params = { - "description": None, - "neutron_net_id": None, - "neutron_subnet_id": None, - "nova_net_id": None, - } - expected_params.update(params) - - scenario.run(**params) - - scenario._create_share_network.assert_called_once_with( - **expected_params) - scenario._delete_share_network.assert_called_once_with(fake_sn) - - @ddt.data( - {}, - {"description": "foo_description"}, - {"neutron_net_id": "foo_neutron_net_id"}, - {"neutron_subnet_id": "foo_neutron_subnet_id"}, - {"nova_net_id": "foo_nova_net_id"}, - {"description": "foo_description", - "neutron_net_id": "foo_neutron_net_id", - "neutron_subnet_id": "foo_neutron_subnet_id", - "nova_net_id": "foo_nova_net_id"}, - ) - def test_create_share_network_and_list(self, params): - scenario = shares.CreateShareNetworkAndList(self.context) - fake_network = mock.Mock() - scenario._create_share_network = mock.Mock( - return_value=fake_network) - scenario._list_share_networks = mock.Mock( - return_value=[fake_network, - mock.Mock(), - mock.Mock()]) - expected_create_params = { - "description": params.get("description"), - "neutron_net_id": params.get("neutron_net_id"), - "neutron_subnet_id": params.get("neutron_subnet_id"), - "nova_net_id": params.get("nova_net_id"), - } - expected_list_params = { - "detailed": params.get("detailed", True), - "search_opts": params.get("search_opts"), - } - expected_create_params.update(params) - - scenario.run(**params) - - scenario._create_share_network.assert_called_once_with( - **expected_create_params) - scenario._list_share_networks.assert_called_once_with( - **expected_list_params) - - @ddt.data( - {}, - {"search_opts": None}, - {"search_opts": {}}, - {"search_opts": {"foo": "bar"}}, - ) - def test_list_share_servers(self, search_opts): - scenario = shares.ListShareServers(self.context) - scenario.context = {"admin": {"credential": "fake_credential"}} - scenario._list_share_servers = mock.MagicMock() - - scenario.run(search_opts=search_opts) - - scenario._list_share_servers.assert_called_once_with( - search_opts=search_opts) - - @ddt.data( - {"security_service_type": "fake_type"}, - {"security_service_type": "fake_type", - "dns_ip": "fake_dns_ip", - "server": "fake_server", - "domain": "fake_domain", - "user": "fake_user", - "password": "fake_password", - "description": "fake_description"}, - ) - def test_create_security_service_and_delete(self, params): - fake_ss = mock.MagicMock() - scenario = shares.CreateSecurityServiceAndDelete(self.context) - scenario._create_security_service = mock.MagicMock( - return_value=fake_ss) - scenario._delete_security_service = mock.MagicMock() - expected_params = { - "security_service_type": params.get("security_service_type"), - "dns_ip": params.get("dns_ip"), - "server": params.get("server"), - "domain": params.get("domain"), - "user": params.get("user"), - "password": params.get("password"), - "description": params.get("description"), - } - - scenario.run(**params) - - scenario._create_security_service.assert_called_once_with( - **expected_params) - scenario._delete_security_service.assert_called_once_with(fake_ss) - - @ddt.data("ldap", "kerberos", "active_directory") - def test_attach_security_service_to_share_network(self, - security_service_type): - scenario = shares.AttachSecurityServiceToShareNetwork(self.context) - scenario._create_share_network = mock.MagicMock() - scenario._create_security_service = mock.MagicMock() - scenario._add_security_service_to_share_network = mock.MagicMock() - - scenario.run(security_service_type=security_service_type) - - scenario._create_share_network.assert_called_once_with() - scenario._create_security_service.assert_called_once_with( - security_service_type=security_service_type) - scenario._add_security_service_to_share_network.assert_has_calls([ - mock.call(scenario._create_share_network.return_value, - scenario._create_security_service.return_value)]) - - @ddt.data( - {"share_proto": "nfs", "size": 3, "detailed": True}, - {"share_proto": "cifs", "size": 4, "detailed": False, - "share_network": "foo", "share_type": "bar"}, - ) - def test_create_and_list_share(self, params): - scenario = shares.CreateAndListShare() - scenario._create_share = mock.MagicMock() - scenario.sleep_between = mock.MagicMock() - scenario._list_shares = mock.MagicMock() - - scenario.run(min_sleep=3, max_sleep=4, **params) - - detailed = params.pop("detailed") - scenario._create_share.assert_called_once_with(**params) - scenario.sleep_between.assert_called_once_with(3, 4) - scenario._list_shares.assert_called_once_with(detailed=detailed) - - @ddt.data( - ({}, 0, 0), - ({}, 1, 1), - ({}, 2, 2), - ({}, 3, 0), - ({"sets": 5, "set_size": 8, "delete_size": 10}, 1, 1), - ) - @ddt.unpack - def test_set_and_delete_metadata(self, params, iteration, share_number): - scenario = shares.SetAndDeleteMetadata() - share_list = [{"id": "fake_share_%s_id" % d} for d in range(3)] - scenario.context = {"tenant": {"shares": share_list}} - scenario.context["iteration"] = iteration - scenario._set_metadata = mock.MagicMock() - scenario._delete_metadata = mock.MagicMock() - expected_set_params = { - "share": share_list[share_number], - "sets": params.get("sets", 10), - "set_size": params.get("set_size", 3), - "key_min_length": params.get("key_min_length", 1), - "key_max_length": params.get("key_max_length", 256), - "value_min_length": params.get("value_min_length", 1), - "value_max_length": params.get("value_max_length", 1024), - } - - scenario.run(**params) - - scenario._set_metadata.assert_called_once_with(**expected_set_params) - scenario._delete_metadata.assert_called_once_with( - share=share_list[share_number], - keys=scenario._set_metadata.return_value, - delete_size=params.get("delete_size", 3), - ) diff --git a/tests/unit/plugins/openstack/scenarios/manila/test_utils.py b/tests/unit/plugins/openstack/scenarios/manila/test_utils.py deleted file mode 100644 index 573f5c3bba..0000000000 --- a/tests/unit/plugins/openstack/scenarios/manila/test_utils.py +++ /dev/null @@ -1,464 +0,0 @@ -# Copyright 2015 Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions -from rally.plugins.openstack.context.manila import consts -from rally.plugins.openstack.scenarios.manila import utils -from tests.unit import test - -BM_UTILS = "rally.task.utils." - - -@ddt.ddt -class ManilaScenarioTestCase(test.ScenarioTestCase): - - def setUp(self): - super(ManilaScenarioTestCase, self).setUp() - self.scenario = utils.ManilaScenario(self.context) - - def test__create_share(self): - fake_share = mock.Mock() - self.clients("manila").shares.create.return_value = fake_share - self.scenario.context = { - "tenant": { - consts.SHARE_NETWORKS_CONTEXT_NAME: { - "share_networks": [{"id": "sn_1_id"}, {"id": "sn_2_id"}], - } - }, - "iteration": 0, - } - fake_random_name = "fake_random_name_value" - self.scenario.generate_random_name = mock.Mock( - return_value=fake_random_name) - - self.scenario._create_share("nfs") - - self.clients("manila").shares.create.assert_called_once_with( - "nfs", 1, name=fake_random_name, - share_network=self.scenario.context["tenant"][ - consts.SHARE_NETWORKS_CONTEXT_NAME]["share_networks"][0]["id"]) - - self.mock_wait_for_status.mock.assert_called_once_with( - fake_share, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=300, check_interval=3) - self.mock_get_from_manager.mock.assert_called_once_with() - - @mock.patch(BM_UTILS + "wait_for_status") - def test__delete_share(self, mock_wait_for_status): - fake_share = mock.MagicMock() - - self.scenario._delete_share(fake_share) - - fake_share.delete.assert_called_once_with() - mock_wait_for_status.assert_called_once_with( - fake_share, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=180, check_interval=2) - self.mock_get_from_manager.mock.assert_called_once_with( - ("error_deleting", )) - - @ddt.data( - {}, - {"detailed": False, "search_opts": None}, - {"detailed": True, "search_opts": {"name": "foo_sn"}}, - {"search_opts": {"project_id": "fake_project"}}, - ) - def test__list_shares(self, params): - fake_shares = ["foo", "bar"] - self.clients("manila").shares.list.return_value = fake_shares - - result = self.scenario._list_shares(**params) - - self.assertEqual(fake_shares, result) - self.clients("manila").shares.list.assert_called_once_with( - detailed=params.get("detailed", True), - search_opts=params.get("search_opts")) - - @ddt.data( - {"new_size": 5}, - {"new_size": 10} - ) - def test__extend_share(self, new_size): - fake_share = mock.MagicMock() - - self.scenario._extend_share(fake_share, new_size) - - fake_share.extend.assert_called_with(new_size) - - self.mock_wait_for_status.mock.assert_called_once_with( - fake_share, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=300, check_interval=3) - self.mock_get_from_manager.mock.assert_called_once_with() - - @ddt.data( - {"new_size": 5}, - {"new_size": 10} - ) - def test__shrink_share(self, new_size): - fake_share = mock.MagicMock() - - self.scenario._shrink_share(fake_share, new_size) - - fake_share.shrink.assert_called_with(new_size) - - self.mock_wait_for_status.mock.assert_called_once_with( - fake_share, - ready_statuses=["available"], - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=300, check_interval=3) - self.mock_get_from_manager.mock.assert_called_once_with() - - @ddt.data( - { - "access_type": "ip", - "access": "1.2.3.4", - "access_level": "rw", - "access_id": "foo" - }, - { - "access_type": "domain", - "access": "4.3.2.1", - "access_level": "ro", - "access_id": "bar" - } - ) - @ddt.unpack - def test__allow_access_share(self, access_type, access, access_level, - access_id): - fake_allow_result = {"id": access_id} - fake_access = mock.MagicMock() - fake_access.id = access_id - fake_update = mock.MagicMock() - self.scenario._update_resource_in_allow_access_share = mock.MagicMock( - return_value=fake_update) - - fake_share = mock.MagicMock() - fake_share.allow.return_value = fake_allow_result - fake_share.access_list.return_value = [fake_access] - - self.assertEqual(self.scenario._allow_access_share( - fake_share, access_type, access, access_level), fake_allow_result) - - self.scenario._update_resource_in_allow_access_share \ - .assert_called_with(fake_share, access_id) - self.mock_wait_for_status.mock.assert_called_once_with( - fake_access, - ready_statuses=["active"], - update_resource=fake_update, - check_interval=3.0, - timeout=300.0) - - def test__get_access_from_share_with_no_access_in_share(self): - access_id = "foo" - fake_share = mock.MagicMock() - fake_access = mock.MagicMock() - fake_access.id = access_id - fake_share.access_list.return_value = [] - - self.assertRaises(exceptions.GetResourceNotFound, - self.scenario._get_access_from_share, - fake_share, access_id) - - def test__get_access_from_share(self): - access_id = "foo" - fake_share = mock.MagicMock() - fake_access = mock.MagicMock() - fake_access.id = access_id - fake_share.access_list.return_value = [fake_access] - - access = self.scenario._get_access_from_share(fake_share, access_id) - - self.assertEqual(access, fake_access) - - def test__update_resource_in_allow_access_share(self): - access_id = "foo" - fake_share = mock.MagicMock() - fake_resource = mock.MagicMock() - fake_access = mock.MagicMock() - fake_access.id = access_id - fake_share.access_list.return_value = [fake_access] - - fn = self.scenario._update_resource_in_allow_access_share( - fake_share, access_id) - - self.assertEqual(fn(fake_resource), fake_access) - - def test__deny_access_share(self): - access_id = "foo" - fake_access = mock.MagicMock() - fake_access.id = access_id - fake_update = mock.MagicMock() - self.scenario._update_resource_in_deny_access_share = mock.MagicMock( - return_value=fake_update) - - fake_share = mock.MagicMock() - fake_share.access_list.return_value = [fake_access] - - self.scenario._deny_access_share(fake_share, access_id) - - self.scenario._update_resource_in_deny_access_share \ - .assert_called_with(fake_share, access_id) - - self.mock_wait_for_status.mock.assert_called_once_with( - fake_access, - check_deletion=True, - ready_statuses=["deleted"], - update_resource=fake_update, - check_interval=2.0, - timeout=180.0) - - def test__update_resource_in_deny_access_share(self): - access_id = "foo" - fake_share = mock.MagicMock() - fake_resource = mock.MagicMock() - fake_access = mock.MagicMock() - fake_access.id = access_id - fake_share.access_list.return_value = [fake_access] - - fn = self.scenario._update_resource_in_deny_access_share( - fake_share, access_id) - - assert fn(fake_resource) == fake_access - - def test__update_resource_in_deny_access_share_with_deleted_resource(self): - access_id = "foo" - fake_share = mock.MagicMock() - fake_resource = mock.MagicMock() - fake_access = mock.MagicMock() - fake_access.access_id = access_id - fake_share.access_list.return_value = [] - - fn = self.scenario._update_resource_in_deny_access_share( - fake_share, access_id) - - self.assertRaises(exceptions.GetResourceNotFound, - fn, fake_resource) - - def test__create_share_network(self): - fake_sn = mock.Mock() - self.scenario.generate_random_name = mock.Mock() - self.clients("manila").share_networks.create.return_value = fake_sn - data = { - "neutron_net_id": "fake_neutron_net_id", - "neutron_subnet_id": "fake_neutron_subnet_id", - "nova_net_id": "fake_nova_net_id", - "description": "fake_description", - } - expected = dict(data) - expected["name"] = self.scenario.generate_random_name.return_value - - result = self.scenario._create_share_network(**data) - - self.assertEqual(fake_sn, result) - self.clients("manila").share_networks.create.assert_called_once_with( - **expected) - - @mock.patch(BM_UTILS + "wait_for_status") - def test__delete_share_network(self, mock_wait_for_status): - fake_sn = mock.MagicMock() - - self.scenario._delete_share_network(fake_sn) - - fake_sn.delete.assert_called_once_with() - mock_wait_for_status.assert_called_once_with( - fake_sn, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=180, check_interval=2) - self.mock_get_from_manager.mock.assert_called_once_with() - - @ddt.data( - {"detailed": True, "search_opts": {"name": "foo_sn"}}, - {"detailed": False, "search_opts": None}, - {}, - {"search_opts": {"project_id": "fake_project"}}, - ) - def test__list_share_networks(self, params): - fake_share_networks = ["foo", "bar"] - self.clients("manila").share_networks.list.return_value = ( - fake_share_networks) - - result = self.scenario._list_share_networks(**params) - - self.assertEqual(fake_share_networks, result) - self.clients("manila").share_networks.list.assert_called_once_with( - detailed=params.get("detailed", True), - search_opts=params.get("search_opts")) - - @ddt.data( - {}, - {"search_opts": None}, - {"search_opts": {"project_id": "fake_project"}}, - ) - def test__list_share_servers(self, params): - fake_share_servers = ["foo", "bar"] - self.admin_clients("manila").share_servers.list.return_value = ( - fake_share_servers) - - result = self.scenario._list_share_servers(**params) - - self.assertEqual(fake_share_servers, result) - self.admin_clients( - "manila").share_servers.list.assert_called_once_with( - search_opts=params.get("search_opts")) - - @ddt.data("ldap", "kerberos", "active_directory") - def test__create_security_service(self, ss_type): - fake_ss = mock.Mock() - self.clients("manila").security_services.create.return_value = fake_ss - self.scenario.generate_random_name = mock.Mock() - data = { - "security_service_type": ss_type, - "dns_ip": "fake_dns_ip", - "server": "fake_server", - "domain": "fake_domain", - "user": "fake_user", - "password": "fake_password", - "description": "fake_description", - } - expected = dict(data) - expected["type"] = expected.pop("security_service_type") - expected["name"] = self.scenario.generate_random_name.return_value - - result = self.scenario._create_security_service(**data) - - self.assertEqual(fake_ss, result) - self.clients( - "manila").security_services.create.assert_called_once_with( - **expected) - - @mock.patch(BM_UTILS + "wait_for_status") - def test__delete_security_service(self, mock_wait_for_status): - fake_ss = mock.MagicMock() - - self.scenario._delete_security_service(fake_ss) - - fake_ss.delete.assert_called_once_with() - mock_wait_for_status.assert_called_once_with( - fake_ss, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - timeout=180, check_interval=2) - self.mock_get_from_manager.mock.assert_called_once_with() - - def test__add_security_service_to_share_network(self): - fake_sn = mock.MagicMock() - fake_ss = mock.MagicMock() - - result = self.scenario._add_security_service_to_share_network( - share_network=fake_sn, security_service=fake_ss) - - self.assertEqual( - self.clients( - "manila").share_networks.add_security_service.return_value, - result) - self.clients( - "manila").share_networks.add_security_service.assert_has_calls([ - mock.call(fake_sn, fake_ss)]) - - @ddt.data( - {"key_min_length": 5, "key_max_length": 4}, - {"value_min_length": 5, "value_max_length": 4}, - ) - def test__set_metadata_wrong_params(self, params): - self.assertRaises( - exceptions.InvalidArgumentsException, - self.scenario._set_metadata, - {"id": "fake_share_id"}, **params) - - @ddt.data( - {}, - {"sets": 0, "set_size": 1}, - {"sets": 1, "set_size": 1}, - {"sets": 5, "set_size": 7}, - {"sets": 5, "set_size": 2}, - {"key_min_length": 1, "key_max_length": 1}, - {"key_min_length": 1, "key_max_length": 2}, - {"key_min_length": 256, "key_max_length": 256}, - {"value_min_length": 1, "value_max_length": 1}, - {"value_min_length": 1, "value_max_length": 2}, - {"value_min_length": 1024, "value_max_length": 1024}, - ) - def test__set_metadata(self, params): - share = {"id": "fake_share_id"} - sets = params.get("sets", 1) - set_size = params.get("set_size", 1) - gen_name_calls = sets * set_size * 2 - data = range(gen_name_calls) - generator_data = iter(data) - - def fake_random_name(prefix="fake", length="fake"): - return next(generator_data) - - scenario = self.scenario - scenario.clients = mock.MagicMock() - scenario._generate_random_part = mock.MagicMock( - side_effect=fake_random_name) - - keys = scenario._set_metadata(share, **params) - - self.assertEqual( - gen_name_calls, - scenario._generate_random_part.call_count) - self.assertEqual( - params.get("sets", 1), - scenario.clients.return_value.shares.set_metadata.call_count) - scenario.clients.return_value.shares.set_metadata.assert_has_calls([ - mock.call( - share["id"], - dict([(j, j + 1) for j in data[ - i * set_size * 2: (i + 1) * set_size * 2: 2]]) - ) for i in range(sets) - ]) - self.assertEqual([i for i in range(0, gen_name_calls, 2)], keys) - - @ddt.data(None, [], {"fake_set"}, {"fake_key": "fake_value"}) - def test__delete_metadata_wrong_params(self, keys): - self.assertRaises( - exceptions.InvalidArgumentsException, - self.scenario._delete_metadata, - "fake_share", keys=keys, - ) - - @ddt.data( - {"keys": [i for i in range(30)]}, - {"keys": list(range(7)), "delete_size": 2}, - {"keys": list(range(7)), "delete_size": 3}, - {"keys": list(range(7)), "delete_size": 4}, - ) - def test__delete_metadata(self, params): - share = {"id": "fake_share_id"} - delete_size = params.get("delete_size", 3) - keys = params.get("keys", []) - scenario = self.scenario - scenario.clients = mock.MagicMock() - - scenario._delete_metadata(share, **params) - - scenario.clients.return_value.shares.delete_metadata.assert_has_calls([ - mock.call(share["id"], keys[i:i + delete_size]) - for i in range(0, len(keys), delete_size) - ]) diff --git a/tests/unit/plugins/openstack/scenarios/mistral/__init__.py b/tests/unit/plugins/openstack/scenarios/mistral/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/mistral/test_executions.py b/tests/unit/plugins/openstack/scenarios/mistral/test_executions.py deleted file mode 100644 index dbd49fa689..0000000000 --- a/tests/unit/plugins/openstack/scenarios/mistral/test_executions.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2016: Nokia Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.mistral import executions -from tests.unit import test - -BASE = "rally.plugins.openstack.scenarios.mistral.executions" -MISTRAL_WBS_BASE = "rally.plugins.openstack.scenarios.mistral.workbooks" - - -WB_DEFINITION = """--- -version: 2.0 -name: wb -workflows: - wf1: - type: direct - tasks: - noop_task: - action: std.noop - wf2: - type: direct - tasks: - noop_task: - action: std.noop - wf3: - type: direct - tasks: - noop_task: - action: std.noop - wf4: - type: direct - tasks: - noop_task: - action: std.noop -""" - -WB_DEF_ONE_WF = """--- -version: 2.0 -name: wb -workflows: - wf1: - type: direct - tasks: - noop_task: - action: std.noop -""" - -PARAMS_EXAMPLE = {"env": {"env_param": "env_param_value"}} -INPUT_EXAMPLE = """{"input1": "value1", "some_json_input": {"a": "b"}}""" - -WB = type("obj", (object,), {"name": "wb", "definition": WB_DEFINITION})() -WB_ONE_WF = ( - type("obj", (object,), {"name": "wb", "definition": WB_DEF_ONE_WF})() -) - - -class MistralExecutionsTestCase(test.ScenarioTestCase): - - @mock.patch("%s.ListExecutions._list_executions" % BASE) - def test_list_executions(self, mock__list_executions): - executions.ListExecutions(self.context).run() - self.assertEqual(1, mock__list_executions.called) - - @mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE, - return_value=WB) - def test_create_execution(self, mock__create_workbook, - mock__create_execution): - - executions.CreateExecutionFromWorkbook(self.context).run(WB_DEFINITION) - - self.assertEqual(1, mock__create_workbook.called) - self.assertEqual(1, mock__create_execution.called) - - @mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE, - return_value=WB) - def test_create_execution_with_input(self, mock__create_workbook, - mock__create_execution): - - executions.CreateExecutionFromWorkbook(self.context).run( - WB_DEFINITION, wf_input=INPUT_EXAMPLE) - - self.assertEqual(1, mock__create_workbook.called) - self.assertEqual(1, mock__create_execution.called) - - @mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE, - return_value=WB) - @mock.patch("json.loads", return_value=PARAMS_EXAMPLE) - def test_create_execution_with_params(self, mock_loads, - mock__create_workbook, - mock__create_execution): - - executions.CreateExecutionFromWorkbook(self.context).run( - WB_DEFINITION, params=str(PARAMS_EXAMPLE)) - - self.assertEqual(1, mock_loads.called) - self.assertEqual(1, mock__create_workbook.called) - self.assertEqual(1, mock__create_execution.called) - - @mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE, - return_value=WB) - def test_create_execution_with_wf_name(self, mock__create_workbook, - mock__create_execution): - - executions.CreateExecutionFromWorkbook(self.context).run( - WB_DEFINITION, "wf4") - - self.assertEqual(1, mock__create_workbook.called) - self.assertEqual(1, mock__create_execution.called) - - # we concatenate workbook name with the workflow name in the test - # the workbook name is not random because we mock the method that - # adds the random part - mock__create_execution.assert_called_once_with("wb.wf4", None,) - - @mock.patch("%s.CreateExecutionFromWorkbook._delete_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._delete_workbook" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE, - return_value=WB) - def test_create_delete_execution( - self, mock__create_workbook, mock__create_execution, - mock__delete_workbook, mock__delete_execution): - - executions.CreateExecutionFromWorkbook(self.context).run( - WB_DEFINITION, do_delete=True) - - self.assertEqual(1, mock__create_workbook.called) - self.assertEqual(1, mock__create_execution.called) - self.assertEqual(1, mock__delete_workbook.called) - self.assertEqual(1, mock__delete_execution.called) - - @mock.patch("%s.CreateExecutionFromWorkbook._delete_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._delete_workbook" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE, - return_value=WB) - def test_create_delete_execution_with_wf_name( - self, mock__create_workbook, mock__create_execution, - mock__delete_workbook, mock__delete_execution): - - executions.CreateExecutionFromWorkbook(self.context).run( - WB_DEFINITION, "wf4", do_delete=True) - - self.assertEqual(1, mock__create_workbook.called) - self.assertEqual(1, mock__create_execution.called) - self.assertEqual(1, mock__delete_workbook.called) - self.assertEqual(1, mock__delete_execution.called) - - # we concatenate workbook name with the workflow name in the test - # the workbook name is not random because we mock the method that - # adds the random part - mock__create_execution.assert_called_once_with("wb.wf4", None) - - @mock.patch("%s.CreateExecutionFromWorkbook._delete_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._delete_workbook" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE) - @mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE, - return_value=WB_ONE_WF) - def test_create_delete_execution_without_wf_name( - self, mock__create_workbook, mock__create_execution, - mock__delete_workbook, mock__delete_execution): - - executions.CreateExecutionFromWorkbook(self.context).run( - WB_DEF_ONE_WF, do_delete=True) - - self.assertEqual(1, mock__create_workbook.called) - self.assertEqual(1, mock__create_execution.called) - self.assertEqual(1, mock__delete_workbook.called) - self.assertEqual(1, mock__delete_execution.called) - - # we concatenate workbook name with the workflow name in the test - # the workbook name is not random because we mock the method that - # adds the random part - mock__create_execution.assert_called_once_with("wb.wf1", None) diff --git a/tests/unit/plugins/openstack/scenarios/mistral/test_utils.py b/tests/unit/plugins/openstack/scenarios/mistral/test_utils.py deleted file mode 100644 index 0b9a5a961b..0000000000 --- a/tests/unit/plugins/openstack/scenarios/mistral/test_utils.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from rally.plugins.openstack.scenarios.mistral import utils -from tests.unit import fakes -from tests.unit import test - -MISTRAL_UTILS = "rally.plugins.openstack.scenarios.mistral.utils" -PARAMS_EXAMPLE = {"env": {"env_param": "param_value"}} -INPUT_EXAMPLE = """{"input1": "value1", "some_json_input": {"a": "b"}}""" - - -class MistralScenarioTestCase(test.ScenarioTestCase): - - def test_list_workbooks(self): - scenario = utils.MistralScenario(context=self.context) - return_wbs_list = scenario._list_workbooks() - self.assertEqual( - self.clients("mistral").workbooks.list.return_value, - return_wbs_list) - self._test_atomic_action_timer( - scenario.atomic_actions(), - "mistral.list_workbooks" - ) - - def test_create_workbook(self): - definition = "version: \"2.0\"\nname: wb" - scenario = utils.MistralScenario(context=self.context) - self.assertEqual( - self.clients("mistral").workbooks.create.return_value, - scenario._create_workbook(definition) - ) - self._test_atomic_action_timer( - scenario.atomic_actions(), - "mistral.create_workbook" - ) - - def test_delete_workbook(self): - scenario = utils.MistralScenario(context=self.context) - scenario._delete_workbook("wb_name") - self.clients("mistral").workbooks.delete.assert_called_once_with( - "wb_name" - ) - self._test_atomic_action_timer( - scenario.atomic_actions(), - "mistral.delete_workbook" - ) - - def test_list_executions(self): - scenario = utils.MistralScenario(context=self.context) - return_executions_list = scenario._list_executions() - self.assertEqual( - return_executions_list, - self.clients("mistral").executions.list.return_value - ) - self._test_atomic_action_timer( - scenario.atomic_actions(), - "mistral.list_executions" - ) - - def test_create_execution(self): - scenario = utils.MistralScenario(context=self.context) - - mock_wait_for_status = self.mock_wait_for_status.mock - wf_name = "fake_wf_name" - mock_create_exec = self.clients("mistral").executions.create - - self.assertEqual( - mock_wait_for_status.return_value, - scenario._create_execution("%s" % wf_name) - ) - - mock_create_exec.assert_called_once_with(wf_name, workflow_input=None) - - args, kwargs = mock_wait_for_status.call_args - self.assertEqual(mock_create_exec.return_value, args[0]) - self.assertEqual(["ERROR"], kwargs["failure_statuses"]) - self.assertEqual(["SUCCESS"], kwargs["ready_statuses"]) - self._test_atomic_action_timer( - scenario.atomic_actions(), - "mistral.create_execution" - ) - - def test_create_execution_with_input(self): - scenario = utils.MistralScenario(context=self.context) - - mock_wait_for_status = self.mock_wait_for_status.mock - wf_name = "fake_wf_name" - mock_create_exec = self.clients("mistral").executions.create - - self.assertEqual( - mock_wait_for_status.return_value, - scenario._create_execution( - wf_name, wf_input=str(INPUT_EXAMPLE)) - ) - - mock_create_exec.assert_called_once_with(wf_name, - workflow_input=INPUT_EXAMPLE) - - def test_create_execution_with_params(self): - scenario = utils.MistralScenario(context=self.context) - - mock_wait_for_status = self.mock_wait_for_status.mock - wf_name = "fake_wf_name" - mock_create_exec = self.clients("mistral").executions.create - - self.assertEqual( - mock_wait_for_status.return_value, - scenario._create_execution( - wf_name, **PARAMS_EXAMPLE) - ) - mock_create_exec.assert_called_once_with(wf_name, workflow_input=None, - **PARAMS_EXAMPLE) - - args, kwargs = mock_wait_for_status.call_args - self.assertEqual(mock_create_exec.return_value, args[0]) - self.assertEqual(["ERROR"], kwargs["failure_statuses"]) - self.assertEqual(["SUCCESS"], kwargs["ready_statuses"]) - self._test_atomic_action_timer( - scenario.atomic_actions(), - "mistral.create_execution" - ) - - args, kwargs = mock_wait_for_status.call_args - self.assertEqual(mock_create_exec.return_value, args[0]) - self.assertEqual(["ERROR"], kwargs["failure_statuses"]) - self.assertEqual(["SUCCESS"], kwargs["ready_statuses"]) - self._test_atomic_action_timer( - scenario.atomic_actions(), - "mistral.create_execution" - ) - - def test_delete_execution(self): - scenario = utils.MistralScenario(context=self.context) - execution = fakes.FakeMistralClient().execution.create() - scenario._delete_execution(execution) - self.clients("mistral").executions.delete.assert_called_once_with( - execution.id - ) - self._test_atomic_action_timer( - scenario.atomic_actions(), - "mistral.delete_execution" - ) diff --git a/tests/unit/plugins/openstack/scenarios/mistral/test_workbooks.py b/tests/unit/plugins/openstack/scenarios/mistral/test_workbooks.py deleted file mode 100644 index 4309c735ce..0000000000 --- a/tests/unit/plugins/openstack/scenarios/mistral/test_workbooks.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.mistral import workbooks -from tests.unit import test - -BASE = "rally.plugins.openstack.scenarios.mistral.workbooks" - - -class MistralWorkbooksTestCase(test.ScenarioTestCase): - - @mock.patch("%s.ListWorkbooks._list_workbooks" % BASE) - def test_list_workbooks(self, mock_list_workbooks__list_workbooks): - workbooks.ListWorkbooks(self.context).run() - mock_list_workbooks__list_workbooks.assert_called_once_with() - - @mock.patch("%s.CreateWorkbook._create_workbook" % BASE) - def test_create_workbook(self, mock_create_workbook__create_workbook): - definition = "---\nversion: \"2.0\"\nname: wb" - fake_wb = mock.MagicMock() - fake_wb.name = "wb" - mock_create_workbook__create_workbook.return_value = fake_wb - workbooks.CreateWorkbook(self.context).run(definition) - - self.assertEqual(1, mock_create_workbook__create_workbook.called) - - @mock.patch("%s.CreateWorkbook._delete_workbook" % BASE) - @mock.patch("%s.CreateWorkbook._create_workbook" % BASE) - def test_create_delete_workbook(self, - mock_create_workbook__create_workbook, - mock_create_workbook__delete_workbook): - definition = "---\nversion: \"2.0\"\nname: wb" - fake_wb = mock.MagicMock() - fake_wb.name = "wb" - mock_create_workbook__create_workbook.return_value = fake_wb - - workbooks.CreateWorkbook(self.context).run(definition, do_delete=True) - - self.assertTrue(mock_create_workbook__create_workbook.called) - mock_create_workbook__delete_workbook.assert_called_once_with( - fake_wb.name) diff --git a/tests/unit/plugins/openstack/scenarios/monasca/__init__.py b/tests/unit/plugins/openstack/scenarios/monasca/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/monasca/test_metrics.py b/tests/unit/plugins/openstack/scenarios/monasca/test_metrics.py deleted file mode 100644 index 2c407ae4e9..0000000000 --- a/tests/unit/plugins/openstack/scenarios/monasca/test_metrics.py +++ /dev/null @@ -1,35 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.monasca import metrics -from tests.unit import test - - -@ddt.ddt -class MonascaMetricsTestCase(test.ScenarioTestCase): - - @ddt.data( - {"region": None}, - {"region": "fake_region"}, - ) - @ddt.unpack - def test_list_metrics(self, region=None): - scenario = metrics.ListMetrics(self.context) - self.region = region - scenario._list_metrics = mock.MagicMock() - scenario.run(region=self.region) - scenario._list_metrics.assert_called_once_with(region=self.region) diff --git a/tests/unit/plugins/openstack/scenarios/monasca/test_utils.py b/tests/unit/plugins/openstack/scenarios/monasca/test_utils.py deleted file mode 100644 index 18891b8e03..0000000000 --- a/tests/unit/plugins/openstack/scenarios/monasca/test_utils.py +++ /dev/null @@ -1,51 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt - -from rally.plugins.openstack.scenarios.monasca import utils -from tests.unit import test - - -@ddt.ddt -class MonascaScenarioTestCase(test.ScenarioTestCase): - - def setUp(self): - super(MonascaScenarioTestCase, self).setUp() - self.scenario = utils.MonascaScenario(self.context) - self.kwargs = { - "dimensions": { - "region": "fake_region", - "hostname": "fake_host_name", - "service": "fake_service", - "url": "fake_url" - } - } - - def test_list_metrics(self): - return_metric_value = self.scenario._list_metrics() - self.assertEqual(return_metric_value, - self.clients("monasca").metrics.list.return_value) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "monasca.list_metrics") - - @ddt.data( - {"name": ""}, - {"name": "fake_metric"}, - ) - @ddt.unpack - def test_create_metrics(self, name=None): - self.name = name - self.scenario._create_metrics(name=self.name, kwargs=self.kwargs) - self.assertEqual(1, self.clients("monasca").metrics.create.call_count) diff --git a/tests/unit/plugins/openstack/scenarios/murano/__init__.py b/tests/unit/plugins/openstack/scenarios/murano/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/murano/test_environments.py b/tests/unit/plugins/openstack/scenarios/murano/test_environments.py deleted file mode 100644 index 4b3933a6db..0000000000 --- a/tests/unit/plugins/openstack/scenarios/murano/test_environments.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.murano import environments -from tests.unit import test - -MURANO_SCENARIO = ("rally.plugins.openstack.scenarios.murano." - "environments") - - -class MuranoEnvironmentsTestCase(test.ScenarioTestCase): - - def _get_context(self): - self.context.update({ - "tenant": { - "packages": [mock.MagicMock(fully_qualified_name="fake")] - }, - "user": { - "tenant_id": "fake_tenant_id" - }, - "config": { - "murano_packages": { - "app_package": ( - "rally-jobs/extra/murano/" - "applications/HelloReporter/" - "io.murano.apps.HelloReporter.zip") - } - } - }) - return self.context - - def test_list_environments(self): - TEST_TARGET = "ListEnvironments" - list_env_module = ("{}.{}.{}").format(MURANO_SCENARIO, - TEST_TARGET, - "_list_environments") - scenario = environments.ListEnvironments(self.context) - with mock.patch(list_env_module) as mock_list_env: - scenario.run() - mock_list_env.assert_called_once_with() - - def test_create_and_delete_environment(self): - TEST_TARGET = "CreateAndDeleteEnvironment" - generate_random_name_module = ("{}.{}.{}").format( - MURANO_SCENARIO, TEST_TARGET, "generate_random_name") - create_env_module = ("{}.{}.{}").format(MURANO_SCENARIO, - TEST_TARGET, - "_create_environment") - create_session_module = ("{}.{}.{}").format(MURANO_SCENARIO, - TEST_TARGET, - "_create_session") - delete_env_module = ("{}.{}.{}").format(MURANO_SCENARIO, - TEST_TARGET, - "_delete_environment") - scenario = environments.CreateAndDeleteEnvironment(self.context) - with mock.patch(generate_random_name_module) as mock_random_name: - with mock.patch(create_env_module) as mock_create_env: - with mock.patch(create_session_module) as mock_create_session: - with mock.patch(delete_env_module) as mock_delete_env: - fake_env = mock.Mock(id="fake_id") - mock_create_env.return_value = fake_env - mock_random_name.return_value = "foo" - scenario.run() - mock_create_env.assert_called_once_with() - mock_create_session.assert_called_once_with( - fake_env.id) - mock_delete_env.assert_called_once_with( - fake_env) - - def test_create_and_deploy_environment(self): - TEST_TARGET = "CreateAndDeployEnvironment" - create_env_module = ("{}.{}.{}").format(MURANO_SCENARIO, - TEST_TARGET, - "_create_environment") - create_session_module = ("{}.{}.{}").format(MURANO_SCENARIO, - TEST_TARGET, - "_create_session") - create_service_module = ("{}.{}.{}").format(MURANO_SCENARIO, - TEST_TARGET, - "_create_service") - deploy_env_module = ("{}.{}.{}").format(MURANO_SCENARIO, - TEST_TARGET, - "_deploy_environment") - scenario = environments.CreateAndDeployEnvironment(self.context) - with mock.patch(create_env_module) as mock_create_env: - with mock.patch(create_session_module) as mock_create_session: - with mock.patch(create_service_module) as mock_create_service: - with mock.patch(deploy_env_module) as mock_deploy_env: - fake_env = mock.MagicMock(id="fake_env_id") - mock_create_env.return_value = fake_env - - fake_session = mock.Mock(id="fake_session_id") - mock_create_session.return_value = fake_session - - scenario.context = self._get_context() - scenario.context["tenants"] = { - "fake_tenant_id": { - "packages": [mock.MagicMock()] - } - } - - scenario.run(1) - - mock_create_env.assert_called_once_with() - mock_create_session.assert_called_once_with( - fake_env.id) - mock_create_service.assert_called_once_with( - fake_env, - fake_session, - "fake") - mock_deploy_env.assert_called_once_with( - fake_env, fake_session) diff --git a/tests/unit/plugins/openstack/scenarios/murano/test_packages.py b/tests/unit/plugins/openstack/scenarios/murano/test_packages.py deleted file mode 100644 index 160dc34979..0000000000 --- a/tests/unit/plugins/openstack/scenarios/murano/test_packages.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.murano import packages -from tests.unit import test - -MURANO_SCENARIO = ("rally.plugins.openstack.scenarios.murano." - "packages.MuranoPackages") - - -class MuranoPackagesTestCase(test.TestCase): - - def setUp(self): - super(MuranoPackagesTestCase, self).setUp() - self.mock_remove = mock.patch("os.remove") - self.mock_remove.start() - - def tearDown(self): - super(MuranoPackagesTestCase, self).tearDown() - self.mock_remove.stop() - - def mock_modules(self, scenario): - scenario._import_package = mock.Mock() - scenario._zip_package = mock.Mock() - scenario._list_packages = mock.Mock() - scenario._delete_package = mock.Mock() - scenario._update_package = mock.Mock() - scenario._filter_applications = mock.Mock() - - def test_make_zip_import_and_list_packages(self): - scenario = packages.ImportAndListPackages() - self.mock_modules(scenario) - scenario.run("foo_package.zip") - scenario._import_package.assert_called_once_with( - scenario._zip_package.return_value) - scenario._zip_package.assert_called_once_with("foo_package.zip") - scenario._list_packages.assert_called_once_with( - include_disabled=False) - - def test_import_and_delete_package(self): - scenario = packages.ImportAndDeletePackage() - self.mock_modules(scenario) - fake_package = mock.Mock() - scenario._import_package.return_value = fake_package - scenario.run("foo_package.zip") - scenario._import_package.assert_called_once_with( - scenario._zip_package.return_value) - scenario._delete_package.assert_called_once_with(fake_package) - - def test_package_lifecycle(self): - scenario = packages.PackageLifecycle() - self.mock_modules(scenario) - fake_package = mock.Mock() - scenario._import_package.return_value = fake_package - scenario.run("foo_package.zip", {"category": "Web"}, "add") - scenario._import_package.assert_called_once_with( - scenario._zip_package.return_value) - scenario._update_package.assert_called_once_with( - fake_package, {"category": "Web"}, "add") - scenario._delete_package.assert_called_once_with(fake_package) - - def test_import_and_filter_applications(self): - scenario = packages.ImportAndFilterApplications() - self.mock_modules(scenario) - fake_package = mock.Mock() - scenario._import_package.return_value = fake_package - scenario.run("foo_package.zip", {"category": "Web"}) - scenario._import_package.assert_called_once_with( - scenario._zip_package.return_value) - scenario._filter_applications.assert_called_once_with( - {"category": "Web"} - ) diff --git a/tests/unit/plugins/openstack/scenarios/murano/test_utils.py b/tests/unit/plugins/openstack/scenarios/murano/test_utils.py deleted file mode 100644 index 311dd69c70..0000000000 --- a/tests/unit/plugins/openstack/scenarios/murano/test_utils.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright 2015: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import cfg -from rally.plugins.openstack.scenarios.murano import utils -from tests.unit import test - -MRN_UTILS = "rally.plugins.openstack.scenarios.murano.utils" -CONF = cfg.CONF - - -class MuranoScenarioTestCase(test.ScenarioTestCase): - - def test_list_environments(self): - self.clients("murano").environments.list.return_value = [] - scenario = utils.MuranoScenario(context=self.context) - return_environments_list = scenario._list_environments() - self.assertEqual([], return_environments_list) - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.list_environments") - - def test_create_environments(self): - self.clients("murano").environments.create = mock.Mock() - scenario = utils.MuranoScenario(context=self.context) - scenario.generate_random_name = mock.Mock() - - create_env = scenario._create_environment() - self.assertEqual( - create_env, - self.clients("murano").environments.create.return_value) - self.clients("murano").environments.create.assert_called_once_with( - {"name": scenario.generate_random_name.return_value}) - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.create_environment") - - def test_delete_environment(self): - environment = mock.Mock(id="id") - self.clients("murano").environments.delete.return_value = "ok" - scenario = utils.MuranoScenario(context=self.context) - scenario._delete_environment(environment) - self.clients("murano").environments.delete.assert_called_once_with( - environment.id - ) - - def test_create_session(self): - self.clients("murano").sessions.configure.return_value = "sess" - scenario = utils.MuranoScenario(context=self.context) - create_sess = scenario._create_session("id") - self.assertEqual("sess", create_sess) - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.create_session") - - def test__create_service(self,): - self.clients("murano").services.post.return_value = "app" - mock_env = mock.Mock(id="ip") - mock_sess = mock.Mock(id="ip") - scenario = utils.MuranoScenario(context=self.context) - - create_app = scenario._create_service(mock_env, mock_sess, - "fake_full_name") - - self.assertEqual("app", create_app) - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.create_service") - - def test_deploy_environment(self): - environment = mock.Mock(id="id") - session = mock.Mock(id="id") - self.clients("murano").sessions.deploy.return_value = "ok" - scenario = utils.MuranoScenario(context=self.context) - scenario._deploy_environment(environment, session) - - self.clients("murano").sessions.deploy.assert_called_once_with( - environment.id, session.id - ) - - config = CONF.openstack - self.mock_wait_for_status.mock.assert_called_once_with( - environment, - update_resource=self.mock_get_from_manager.mock.return_value, - ready_statuses=["READY"], - check_interval=config.murano_deploy_environment_check_interval, - timeout=config.murano_deploy_environment_timeout) - self.mock_get_from_manager.mock.assert_called_once_with( - ["DEPLOY FAILURE"]) - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.deploy_environment") - - @mock.patch(MRN_UTILS + ".open", - side_effect=mock.mock_open(read_data="Key: value"), - create=True) - def test_read_from_file(self, mock_open): - utility = utils.MuranoPackageManager({"uuid": "fake_task_id"}) - data = utility._read_from_file("filename") - expected_data = {"Key": "value"} - self.assertEqual(expected_data, data) - - @mock.patch(MRN_UTILS + ".MuranoPackageManager._read_from_file") - @mock.patch(MRN_UTILS + ".MuranoPackageManager._write_to_file") - def test_change_app_fullname( - self, mock_murano_package_manager__write_to_file, - mock_murano_package_manager__read_from_file): - manifest = {"FullName": "app.name_abc", - "Classes": {"app.name_abc": "app_class.yaml"}} - mock_murano_package_manager__read_from_file.side_effect = ( - [manifest]) - utility = utils.MuranoPackageManager({"uuid": "fake_task_id"}) - utility._change_app_fullname("tmp/tmpfile/") - mock_murano_package_manager__read_from_file.assert_has_calls( - [mock.call("tmp/tmpfile/manifest.yaml")] - ) - mock_murano_package_manager__write_to_file.assert_has_calls( - [mock.call(manifest, "tmp/tmpfile/manifest.yaml")] - ) - - @mock.patch("zipfile.is_zipfile") - @mock.patch("tempfile.mkdtemp") - @mock.patch("shutil.copytree") - @mock.patch(MRN_UTILS + ".MuranoPackageManager._change_app_fullname") - @mock.patch("rally.common.fileutils.pack_dir") - @mock.patch("shutil.rmtree") - def test_prepare_zip_if_not_zip( - self, mock_shutil_rmtree, mock_pack_dir, - mock_murano_package_manager__change_app_fullname, - mock_shutil_copytree, mock_tempfile_mkdtemp, - mock_zipfile_is_zipfile): - utility = utils.MuranoPackageManager({"uuid": "fake_task_id"}) - package_path = "tmp/tmpfile" - - mock_zipfile_is_zipfile.return_value = False - mock_tempfile_mkdtemp.return_value = "tmp/tmpfile" - mock_pack_dir.return_value = "tmp/tmpzipfile" - - zip_file = utility._prepare_package(package_path) - - self.assertEqual("tmp/tmpzipfile", zip_file) - mock_tempfile_mkdtemp.assert_called_once_with() - mock_shutil_copytree.assert_called_once_with( - "tmp/tmpfile", - "tmp/tmpfile/package/" - ) - (mock_murano_package_manager__change_app_fullname. - assert_called_once_with("tmp/tmpfile/package/")) - mock_shutil_rmtree.assert_called_once_with("tmp/tmpfile") - - @mock.patch("zipfile.is_zipfile") - def test_prepare_zip_if_zip(self, mock_zipfile_is_zipfile): - utility = utils.MuranoPackageManager({"uuid": "fake_task_id"}) - package_path = "tmp/tmpfile.zip" - mock_zipfile_is_zipfile.return_value = True - zip_file = utility._prepare_package(package_path) - self.assertEqual("tmp/tmpfile.zip", zip_file) - - def test_list_packages(self): - scenario = utils.MuranoScenario() - self.assertEqual(self.clients("murano").packages.list.return_value, - scenario._list_packages()) - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.list_packages") - - @mock.patch(MRN_UTILS + ".open", create=True) - def test_import_package(self, mock_open): - self.clients("murano").packages.create.return_value = ( - "created_foo_package" - ) - scenario = utils.MuranoScenario() - mock_open.return_value = "opened_foo_package.zip" - imp_package = scenario._import_package("foo_package.zip") - self.assertEqual("created_foo_package", imp_package) - self.clients("murano").packages.create.assert_called_once_with( - {}, {"file": "opened_foo_package.zip"}) - mock_open.assert_called_once_with("foo_package.zip") - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.import_package") - - def test_delete_package(self): - package = mock.Mock(id="package_id") - scenario = utils.MuranoScenario() - scenario._delete_package(package) - self.clients("murano").packages.delete.assert_called_once_with( - "package_id" - ) - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.delete_package") - - def test_update_package(self): - package = mock.Mock(id="package_id") - self.clients("murano").packages.update.return_value = "updated_package" - scenario = utils.MuranoScenario() - upd_package = scenario._update_package( - package, {"tags": ["tag"]}, "add" - ) - self.assertEqual("updated_package", upd_package) - self.clients("murano").packages.update.assert_called_once_with( - "package_id", - {"tags": ["tag"]}, - "add" - ) - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.update_package") - - def test_filter_packages(self): - self.clients("murano").packages.filter.return_value = [] - scenario = utils.MuranoScenario() - return_apps_list = scenario._filter_applications( - {"category": "Web"} - ) - self.assertEqual([], return_apps_list) - self.clients("murano").packages.filter.assert_called_once_with( - category="Web" - ) - self._test_atomic_action_timer(scenario.atomic_actions(), - "murano.filter_applications") diff --git a/tests/unit/plugins/openstack/scenarios/neutron/__init__.py b/tests/unit/plugins/openstack/scenarios/neutron/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/neutron/test_bgpvpn.py b/tests/unit/plugins/openstack/scenarios/neutron/test_bgpvpn.py deleted file mode 100644 index 31e7ddb0ad..0000000000 --- a/tests/unit/plugins/openstack/scenarios/neutron/test_bgpvpn.py +++ /dev/null @@ -1,225 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.neutron import bgpvpn -from tests.unit import test - - -@ddt.ddt -class NeutronBgpvpnTestCase(test.TestCase): - - def _get_context(self, resource=None): - context = test.get_test_context() - if resource in ("network", "router"): - context.update({ - "user": { - "id": "fake_user", - "tenant_id": "fake_tenant", - "credential": mock.MagicMock()} - }) - if resource == "network": - context.update( - {"tenant": {"id": "fake_tenant", - resource + "s": [{"id": "fake_net", - "tenant_id": "fake_tenant", - "router_id": "fake_router"}]} - }) - elif resource == "router": - context.update( - {"tenant": {"id": "fake_tenant", - resource + "s": [ - {resource: {"id": "fake_net", - "tenant_id": "fake_tenant"}}]} - }) - return context - - def _get_bgpvpn_create_data(self): - return { - "route_targets": None, - "import_targets": None, - "export_targets": None, - "route_distinguishers": None} - - def _get_bgpvpn_update_data(self): - return { - "route_targets": None, - "import_targets": None, - "export_targets": None, - "route_distinguishers": None} - - @ddt.data( - {}, - {"bgpvpn_create_args": None}, - {"bgpvpn_create_args": {}}, - ) - @ddt.unpack - def test_create_and_delete_bgpvpns(self, bgpvpn_create_args=None): - scenario = bgpvpn.CreateAndDeleteBgpvpns(self._get_context()) - bgpvpn_create_data = bgpvpn_create_args or {} - create_data = self._get_bgpvpn_create_data() - create_data.update(bgpvpn_create_data) - scenario._create_bgpvpn = mock.Mock() - scenario._delete_bgpvpn = mock.Mock() - scenario.run(**create_data) - scenario._create_bgpvpn.assert_called_once_with( - type="l3", **create_data) - scenario._delete_bgpvpn.assert_called_once_with( - scenario._create_bgpvpn.return_value) - - @ddt.data( - {}, - {"bgpvpn_create_args": None}, - {"bgpvpn_create_args": {}}, - ) - @ddt.unpack - def test_create_and_list_bgpvpns(self, bgpvpn_create_args=None): - scenario = bgpvpn.CreateAndListBgpvpns(self._get_context()) - bgpvpn_create_data = bgpvpn_create_args or {} - create_data = self._get_bgpvpn_create_data() - create_data.update(bgpvpn_create_data) - bgpvpn_created = {"bgpvpn": {"id": 1, "name": "b1"}} - bgpvpn_listed = [{"id": 1}] - scenario._create_bgpvpn = mock.Mock(return_value=bgpvpn_created) - scenario._list_bgpvpns = mock.Mock(return_value=bgpvpn_listed) - scenario.run(**create_data) - scenario._create_bgpvpn.assert_called_once_with( - type="l3", **create_data) - scenario._list_bgpvpns.assert_called_once_with() - - @ddt.data( - {}, - {"bgpvpn_create_args": {}}, - {"bgpvpn_update_args": {}}, - {"bgpvpn_update_args": {"update_name": True}}, - {"bgpvpn_update_args": {"update_name": False}}, - ) - @ddt.unpack - def test_create_and_update_bgpvpns(self, bgpvpn_create_args=None, - bgpvpn_update_args=None): - scenario = bgpvpn.CreateAndUpdateBgpvpns(self._get_context()) - bgpvpn_create_data = bgpvpn_create_args or {} - bgpvpn_update_data = bgpvpn_update_args or {} - create_data = self._get_bgpvpn_create_data() - create_data.update(bgpvpn_create_data) - update_data = self._get_bgpvpn_update_data() - update_data.update(bgpvpn_update_data) - if "update_name" not in update_data: - update_data["update_name"] = False - bgpvpn_data = {} - bgpvpn_data.update(bgpvpn_create_data) - bgpvpn_data.update(bgpvpn_update_data) - scenario._create_bgpvpn = mock.Mock() - scenario._update_bgpvpn = mock.Mock() - scenario.run(**bgpvpn_data) - scenario._create_bgpvpn.assert_called_once_with( - type="l3", **create_data) - scenario._update_bgpvpn.assert_called_once_with( - scenario._create_bgpvpn.return_value, **update_data) - - @mock.patch.object(bgpvpn, "random") - def test_create_and_associate_disassociate_networks(self, mock_random): - scenario = bgpvpn.CreateAndAssociateDissassociateNetworks( - self._get_context("network")) - create_data = self._get_bgpvpn_create_data() - networks = self._get_context("network")["tenant"]["networks"] - create_data["tenant_id"] = networks[0]["tenant_id"] - mock_random.randint.return_value = 12345 - create_data["route_targets"] = "12345:12345" - scenario._create_bgpvpn = mock.Mock() - scenario._create_bgpvpn_network_assoc = mock.Mock() - scenario._delete_bgpvpn_network_assoc = mock.Mock() - scenario.run() - scenario._create_bgpvpn.assert_called_once_with( - type="l3", **create_data) - - scenario._create_bgpvpn_network_assoc.assert_called_once_with( - scenario._create_bgpvpn.return_value, networks[0]) - scenario._delete_bgpvpn_network_assoc.assert_called_once_with( - scenario._create_bgpvpn.return_value, - scenario._create_bgpvpn_network_assoc.return_value) - - @mock.patch.object(bgpvpn, "random") - def test_create_and_associate_disassociate_routers(self, mock_random): - scenario = bgpvpn.CreateAndAssociateDissassociateRouters( - self._get_context("network")) - create_data = self._get_bgpvpn_create_data() - router = {"id": self._get_context( - "network")["tenant"]["networks"][0]["router_id"]} - create_data["tenant_id"] = self._get_context("network")["tenant"]["id"] - mock_random.randint.return_value = 12345 - create_data["route_targets"] = "12345:12345" - scenario._create_bgpvpn = mock.Mock() - scenario._create_bgpvpn_router_assoc = mock.Mock() - scenario._delete_bgpvpn_router_assoc = mock.Mock() - scenario.run() - - scenario._create_bgpvpn.assert_called_once_with( - type="l3", **create_data) - scenario._create_bgpvpn_router_assoc.assert_called_once_with( - scenario._create_bgpvpn.return_value, router) - scenario._delete_bgpvpn_router_assoc.assert_called_once_with( - scenario._create_bgpvpn.return_value, - scenario._create_bgpvpn_router_assoc.return_value) - - @mock.patch.object(bgpvpn, "random") - def test_create_and_list_networks_assocs(self, mock_random): - scenario = bgpvpn.CreateAndListNetworksAssocs( - self._get_context("network")) - create_data = self._get_bgpvpn_create_data() - networks = self._get_context("network")["tenant"]["networks"] - create_data["tenant_id"] = networks[0]["tenant_id"] - network_assocs = { - "network_associations": [{"network_id": networks[0]["id"]}] - } - mock_random.randint.return_value = 12345 - create_data["route_targets"] = "12345:12345" - scenario._create_bgpvpn = mock.Mock() - scenario._create_bgpvpn_network_assoc = mock.Mock() - scenario._list_bgpvpn_network_assocs = mock.Mock( - return_value=network_assocs) - scenario.run() - - scenario._create_bgpvpn.assert_called_once_with( - type="l3", **create_data) - scenario._create_bgpvpn_network_assoc.assert_called_once_with( - scenario._create_bgpvpn.return_value, networks[0]) - scenario._list_bgpvpn_network_assocs.assert_called_once_with( - scenario._create_bgpvpn.return_value) - - @mock.patch.object(bgpvpn, "random") - def test_create_and_list_routers_assocs(self, mock_random): - scenario = bgpvpn.CreateAndListRoutersAssocs( - self._get_context("network")) - create_data = self._get_bgpvpn_create_data() - router = {"id": self._get_context( - "network")["tenant"]["networks"][0]["router_id"]} - create_data["tenant_id"] = self._get_context("network")["tenant"]["id"] - router_assocs = { - "router_associations": [{"router_id": router["id"]}] - } - mock_random.randint.return_value = 12345 - create_data["route_targets"] = "12345:12345" - scenario._create_bgpvpn = mock.Mock() - scenario._create_bgpvpn_router_assoc = mock.Mock() - scenario._list_bgpvpn_router_assocs = mock.Mock( - return_value=router_assocs) - scenario.run() - - scenario._create_bgpvpn.assert_called_once_with( - type="l3", **create_data) - scenario._create_bgpvpn_router_assoc.assert_called_once_with( - scenario._create_bgpvpn.return_value, router) - scenario._list_bgpvpn_router_assocs.assert_called_once_with( - scenario._create_bgpvpn.return_value) diff --git a/tests/unit/plugins/openstack/scenarios/neutron/test_loadbalancer_v1.py b/tests/unit/plugins/openstack/scenarios/neutron/test_loadbalancer_v1.py deleted file mode 100644 index d15819050d..0000000000 --- a/tests/unit/plugins/openstack/scenarios/neutron/test_loadbalancer_v1.py +++ /dev/null @@ -1,303 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.neutron import loadbalancer_v1 -from tests.unit import test - - -@ddt.ddt -class NeutronLoadbalancerv1TestCase(test.TestCase): - - def _get_context(self): - context = test.get_test_context() - context.update({ - "user": { - "id": "fake_user", - "tenant_id": "fake_tenant", - "credential": mock.MagicMock() - }, - "tenant": {"id": "fake_tenant", - "networks": [{"id": "fake_net", - "subnets": ["fake_subnet"]}]}}) - return context - - @ddt.data( - {}, - {"pool_create_args": None}, - {"pool_create_args": {}}, - {"pool_create_args": {"name": "given-name"}}, - ) - @ddt.unpack - def test_create_and_list_pools(self, pool_create_args=None): - scenario = loadbalancer_v1.CreateAndListPools(self._get_context()) - pool_data = pool_create_args or {} - networks = self._get_context()["tenant"]["networks"] - scenario._create_v1_pools = mock.Mock() - scenario._list_v1_pools = mock.Mock() - scenario.run(pool_create_args=pool_create_args) - scenario._create_v1_pools.assert_called_once_with(networks, - **pool_data) - scenario._list_v1_pools.assert_called_once_with() - - @ddt.data( - {}, - {"pool_create_args": None}, - {"pool_create_args": {}}, - {"pool_create_args": {"name": "given-name"}}, - ) - @ddt.unpack - def test_create_and_delete_pools(self, pool_create_args=None): - scenario = loadbalancer_v1.CreateAndDeletePools(self._get_context()) - pools = [{ - "pool": { - "id": "pool-id" - } - }] - pool_data = pool_create_args or {} - networks = self._get_context()["tenant"]["networks"] - scenario._create_v1_pools = mock.Mock(return_value=pools) - scenario._delete_v1_pool = mock.Mock() - scenario.run(pool_create_args=pool_create_args) - self.assertEqual([mock.call(networks, **pool_data)], - scenario._create_v1_pools.mock_calls) - for _ in pools: - self.assertEqual(1, scenario._delete_v1_pool.call_count) - - @ddt.data( - {}, - {"pool_create_args": None}, - {"pool_create_args": {}}, - {"pool_create_args": {"name": "given-name"}}, - {"pool_update_args": None}, - {"pool_update_args": {}}, - {"pool_update_args": {"name": "updated-name"}}, - {"pool_create_args": None, "pool_update_args": None}, - {"pool_create_args": {"name": "given-name"}, - "pool_update_args": {"name": "updated-name"}}, - {"pool_create_args": None, - "pool_update_args": {"name": "updated-name"}}, - {"pool_create_args": None, "pool_update_args": {}}, - {"pool_create_args": {}, "pool_update_args": None}, - ) - @ddt.unpack - def test_create_and_update_pools(self, pool_create_args=None, - pool_update_args=None): - scenario = loadbalancer_v1.CreateAndUpdatePools(self._get_context()) - pools = [{ - "pool": { - "id": "pool-id" - } - }] - updated_pool = { - "pool": { - "id": "pool-id", - "name": "updated-pool", - "admin_state_up": True - } - } - pool_data = pool_create_args or {} - pool_update_args = pool_update_args or {} - pool_update_args.update({"name": "_updated", "admin_state_up": True}) - scenario._create_v1_pools = mock.Mock(return_value=pools) - scenario._update_v1_pool = mock.Mock(return_value=updated_pool) - networks = self._get_context()["tenant"]["networks"] - scenario.run(pool_create_args=pool_data, - pool_update_args=pool_update_args) - self.assertEqual([mock.call(networks, **pool_data)], - scenario._create_v1_pools.mock_calls) - for pool in pools: - scenario._update_v1_pool.assert_called_once_with( - pool, **pool_update_args) - - @ddt.data( - {}, - {"vip_create_args": None}, - {"vip_create_args": {}}, - {"vip_create_args": {"name": "given-vip-name"}}, - {"pool_create_args": None}, - {"pool_create_args": {}}, - {"pool_create_args": {"name": "given-pool-name"}}, - ) - @ddt.unpack - def test_create_and_list_vips(self, pool_create_args=None, - vip_create_args=None): - scenario = loadbalancer_v1.CreateAndListVips(self._get_context()) - pools = [{ - "pool": { - "id": "pool-id" - } - }] - vip_data = vip_create_args or {} - pool_data = pool_create_args or {} - networks = self._get_context()["tenant"]["networks"] - scenario._create_v1_pools = mock.Mock(return_value=pools) - scenario._create_v1_vip = mock.Mock() - scenario._list_v1_vips = mock.Mock() - scenario.run(pool_create_args=pool_create_args, - vip_create_args=vip_create_args) - scenario._create_v1_pools.assert_called_once_with(networks, - **pool_data) - scenario._create_v1_vip.assert_has_calls( - [mock.call(pool, **vip_data) for pool in pools]) - scenario._list_v1_vips.assert_called_once_with() - - @ddt.data( - {}, - {"vip_create_args": None}, - {"vip_create_args": {}}, - {"vip_create_args": {"name": "given-name"}}, - {"pool_create_args": None}, - {"pool_create_args": {}}, - {"pool_create_args": {"name": "given-pool-name"}}, - ) - @ddt.unpack - def test_create_and_delete_vips(self, pool_create_args=None, - vip_create_args=None): - scenario = loadbalancer_v1.CreateAndDeleteVips(self._get_context()) - pools = [{ - "pool": { - "id": "pool-id" - } - }] - vip = { - "vip": { - "id": "vip-id" - } - } - vip_data = vip_create_args or {} - pool_data = pool_create_args or {} - networks = self._get_context()["tenant"]["networks"] - scenario._create_v1_pools = mock.Mock(return_value=pools) - scenario._create_v1_vip = mock.Mock(return_value=vip) - scenario._delete_v1_vip = mock.Mock() - scenario.run(pool_create_args=pool_create_args, - vip_create_args=vip_create_args) - scenario._create_v1_pools.assert_called_once_with(networks, - **pool_data) - scenario._create_v1_vip.assert_has_calls( - [mock.call(pool, **vip_data) for pool in pools]) - scenario._delete_v1_vip.assert_has_calls([mock.call(vip["vip"])]) - - @ddt.data( - {}, - {"vip_create_args": None}, - {"vip_create_args": {}}, - {"vip_create_args": {"name": "given-vip-name"}}, - {"pool_create_args": None}, - {"pool_create_args": {}}, - {"pool_create_args": {"name": "given-pool-name"}}, - ) - @ddt.unpack - def test_create_and_update_vips(self, pool_create_args=None, - vip_create_args=None, - vip_update_args=None): - scenario = loadbalancer_v1.CreateAndUpdateVips(self._get_context()) - pools = [{ - "pool": { - "id": "pool-id", - } - }] - expected_vip = { - "vip": { - "id": "vip-id", - "name": "vip-name" - } - } - updated_vip = { - "vip": { - "id": "vip-id", - "name": "updated-vip-name" - } - } - vips = [expected_vip] - vip_data = vip_create_args or {} - vip_update_data = vip_update_args or {} - pool_data = pool_create_args or {} - networks = self._get_context()["tenant"]["networks"] - scenario._create_v1_pools = mock.Mock(return_value=pools) - scenario._create_v1_vip = mock.Mock(return_value=expected_vip) - scenario._update_v1_vip = mock.Mock(return_value=updated_vip) - scenario.run(pool_create_args=pool_create_args, - vip_create_args=vip_create_args, - vip_update_args=vip_update_args) - scenario._create_v1_pools.assert_called_once_with(networks, - **pool_data) - scenario._create_v1_vip.assert_has_calls( - [mock.call(pool, **vip_data) for pool in pools]) - scenario._update_v1_vip.assert_has_calls( - [mock.call(vip, **vip_update_data) for vip in vips]) - - @ddt.data( - {}, - {"healthmonitor_create_args": None}, - {"healthmonitor_create_args": {}}, - {"healthmonitor_create_args": {"name": "given-name"}}, - ) - @ddt.unpack - def test_create_and_list_healthmonitors(self, - healthmonitor_create_args=None): - scenario = loadbalancer_v1.CreateAndListHealthmonitors( - self._get_context()) - hm_data = healthmonitor_create_args or {} - scenario._create_v1_healthmonitor = mock.Mock() - scenario._list_v1_healthmonitors = mock.Mock() - scenario.run(healthmonitor_create_args=healthmonitor_create_args) - scenario._create_v1_healthmonitor.assert_called_once_with(**hm_data) - scenario._list_v1_healthmonitors.assert_called_once_with() - - @ddt.data( - {}, - {"healthmonitor_create_args": None}, - {"healthmonitor_create_args": {}}, - {"healthmonitor_create_args": {"name": "given-name"}}, - ) - @ddt.unpack - def test_create_and_delete_healthmonitors(self, - healthmonitor_create_args=None): - scenario = loadbalancer_v1.CreateAndDeleteHealthmonitors( - self._get_context()) - hm = {"health_monitor": {"id": "hm-id"}} - hm_data = healthmonitor_create_args or {} - scenario._create_v1_healthmonitor = mock.Mock(return_value=hm) - scenario._delete_v1_healthmonitor = mock.Mock() - scenario.run(healthmonitor_create_args=healthmonitor_create_args) - scenario._create_v1_healthmonitor.assert_called_once_with(**hm_data) - scenario._delete_v1_healthmonitor.assert_called_once_with( - scenario._create_v1_healthmonitor.return_value["health_monitor"]) - - @ddt.data( - {}, - {"healthmonitor_create_args": None}, - {"healthmonitor_create_args": {}}, - {"healthmonitor_create_args": {"name": "given-name"}}, - ) - @ddt.unpack - def test_create_and_update_healthmonitors(self, - healthmonitor_create_args=None, - healthmonitor_update_args=None): - scenario = loadbalancer_v1.CreateAndUpdateHealthmonitors( - self._get_context()) - mock_random = loadbalancer_v1.random = mock.Mock() - hm = {"healthmonitor": {"id": "hm-id"}} - hm_data = healthmonitor_create_args or {} - hm_update_data = healthmonitor_update_args or { - "max_retries": mock_random.choice.return_value} - scenario._create_v1_healthmonitor = mock.Mock(return_value=hm) - scenario._update_v1_healthmonitor = mock.Mock() - scenario.run(healthmonitor_create_args=healthmonitor_create_args, - healthmonitor_update_args=healthmonitor_update_args) - scenario._create_v1_healthmonitor.assert_called_once_with(**hm_data) - scenario._update_v1_healthmonitor.assert_called_once_with( - scenario._create_v1_healthmonitor.return_value, **hm_update_data) diff --git a/tests/unit/plugins/openstack/scenarios/neutron/test_loadbalancer_v2.py b/tests/unit/plugins/openstack/scenarios/neutron/test_loadbalancer_v2.py deleted file mode 100755 index 565f85951a..0000000000 --- a/tests/unit/plugins/openstack/scenarios/neutron/test_loadbalancer_v2.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.neutron import loadbalancer_v2 -from tests.unit import test - - -@ddt.ddt -class NeutronLoadbalancerv2TestCase(test.TestCase): - - def _get_context(self): - context = test.get_test_context() - context.update({ - "user": { - "id": "fake_user", - "tenant_id": "fake_tenant", - "credential": mock.MagicMock() - }, - "tenant": {"id": "fake_tenant", - "networks": [{"id": "fake_net", - "subnets": ["fake_subnet"]}]}}) - return context - - @ddt.data( - {}, - {"lb_create_args": None}, - {"lb_create_args": {}}, - {"lb_create_args": {"name": "given-name"}}, - ) - @ddt.unpack - def test_create_and_list_load_balancers(self, lb_create_args=None): - context = self._get_context() - scenario = loadbalancer_v2.CreateAndListLoadbalancers(context) - lb_create_args = lb_create_args or {} - networks = context["tenant"]["networks"] - scenario._create_lbaasv2_loadbalancer = mock.Mock() - scenario._list_lbaasv2_loadbalancers = mock.Mock() - scenario.run(lb_create_args=lb_create_args) - - subnets = [] - mock_has_calls = [] - for network in networks: - subnets.extend(network.get("subnets", [])) - for subnet in subnets: - mock_has_calls.append(mock.call(subnet, **lb_create_args)) - scenario._create_lbaasv2_loadbalancer.assert_has_calls(mock_has_calls) - scenario._list_lbaasv2_loadbalancers.assert_called_once_with() diff --git a/tests/unit/plugins/openstack/scenarios/neutron/test_network.py b/tests/unit/plugins/openstack/scenarios/neutron/test_network.py deleted file mode 100644 index 48f008cc25..0000000000 --- a/tests/unit/plugins/openstack/scenarios/neutron/test_network.py +++ /dev/null @@ -1,598 +0,0 @@ -# Copyright 2014: Intel Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions as rally_exceptions -from rally.plugins.openstack.scenarios.neutron import network -from tests.unit import test - -BASE = "rally.plugins.openstack.scenarios.neutron.network" - - -@ddt.ddt -class NeutronNetworksTestCase(test.ScenarioTestCase): - - @ddt.data( - {"network_create_args": {}}, - {"network_create_args": {"name": "given-name"}}, - {"network_create_args": {"provider:network_type": "vxlan"}} - ) - @ddt.unpack - @mock.patch("%s.CreateAndListNetworks._list_networks" % BASE) - @mock.patch("%s.CreateAndListNetworks._create_network" % BASE) - def test_create_and_list_networks(self, - mock__create_network, - mock__list_networks, - network_create_args): - scenario = network.CreateAndListNetworks(self.context) - - scenario.run(network_create_args=network_create_args) - mock__create_network.assert_called_once_with(network_create_args) - mock__list_networks.assert_called_once_with() - - mock__create_network.reset_mock() - mock__list_networks.reset_mock() - - @ddt.data( - {"network_create_args": {}}, - {"network_create_args": {"name": "given-name"}}, - ) - @ddt.unpack - @mock.patch("%s.CreateAndShowNetwork._show_network" % BASE) - @mock.patch("%s.CreateAndShowNetwork._create_network" % BASE) - def test_create_and_show_network(self, - mock__create_network, - mock__show_network, - network_create_args): - scenario = network.CreateAndShowNetwork(self.context) - mock_net = mock.Mock() - - mock__create_network.return_value = mock_net - scenario.run(network_create_args=network_create_args) - - mock__create_network.assert_called_once_with(network_create_args) - mock__show_network.assert_called_once_with(mock_net) - - mock__create_network.reset_mock() - mock__show_network.reset_mock() - - @mock.patch("%s.CreateAndUpdateNetworks._update_network" % BASE) - @mock.patch("%s.CreateAndUpdateNetworks._create_network" % BASE, - return_value={ - "network": { - "id": "network-id", - "name": "network-name", - "admin_state_up": False - } - }) - def test_create_and_update_networks(self, - mock__create_network, - mock__update_network): - scenario = network.CreateAndUpdateNetworks(self.context) - - network_update_args = {"name": "_updated", "admin_state_up": True} - - # Default options - scenario.run(network_update_args=network_update_args) - - mock__create_network.assert_called_once_with({}) - - mock__update_network.assert_has_calls( - [mock.call( - mock__create_network.return_value, network_update_args - )]) - - mock__create_network.reset_mock() - mock__update_network.reset_mock() - - # Explicit network name is specified - network_create_args = { - "name": "network-name", - "admin_state_up": False - } - - scenario.run(network_create_args=network_create_args, - network_update_args=network_update_args) - mock__create_network.assert_called_once_with(network_create_args) - mock__update_network.assert_has_calls( - [mock.call(mock__create_network.return_value, - network_update_args)]) - - @mock.patch("%s.CreateAndDeleteNetworks._delete_network" % BASE) - @mock.patch("%s.CreateAndDeleteNetworks._create_network" % BASE) - def test_create_and_delete_networks(self, - mock__create_network, - mock__delete_network): - scenario = network.CreateAndDeleteNetworks(self.context) - - # Default options - network_create_args = {} - scenario.run() - mock__create_network.assert_called_once_with(network_create_args) - self.assertTrue(mock__delete_network.call_count) - - mock__create_network.reset_mock() - mock__delete_network.reset_mock() - - # Explicit network name is specified - network_create_args = {"name": "given-name"} - scenario.run(network_create_args=network_create_args) - mock__create_network.assert_called_once_with(network_create_args) - self.assertTrue(mock__delete_network.call_count) - - def test_create_and_list_subnets(self): - network_create_args = {"router:external": True} - subnet_create_args = {"allocation_pools": []} - subnet_cidr_start = "default_cidr" - subnets_per_network = 5 - net = mock.MagicMock() - - scenario = network.CreateAndListSubnets(self.context) - scenario._create_network = mock.Mock(return_value=net) - scenario._create_subnets = mock.Mock() - scenario._list_subnets = mock.Mock() - - scenario.run(network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnet_cidr_start=subnet_cidr_start, - subnets_per_network=subnets_per_network) - - scenario._create_network.assert_called_once_with( - network_create_args) - scenario._create_subnets.assert_called_once_with( - net, subnet_create_args, subnet_cidr_start, subnets_per_network) - - scenario._list_subnets.assert_called_once_with() - - def test_create_and_show_subnets(self): - network_create_args = {"router:external": True} - subnet_create_args = {"allocation_pools": []} - subnet_cidr_start = "1.1.0.0/30" - subnets_per_network = 5 - net = mock.MagicMock() - - scenario = network.CreateAndShowSubnets(self.context) - scenario._get_or_create_network = mock.Mock(return_value=net) - scenario._create_subnets = mock.MagicMock() - scenario._show_subnet = mock.Mock() - - scenario.run(network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnet_cidr_start=subnet_cidr_start, - subnets_per_network=subnets_per_network) - - scenario._get_or_create_network.assert_called_once_with( - network_create_args) - scenario._create_subnets.assert_called_once_with( - net, subnet_create_args, subnet_cidr_start, subnets_per_network) - for subnet in scenario._create_subnets.return_value: - scenario._show_subnet.assert_called_with(subnet) - - def test_set_and_clear_router_gateway(self): - network_create_args = {"router:external": True} - router_create_args = {"admin_state_up": True} - enable_snat = True - ext_net = mock.MagicMock() - router = mock.MagicMock() - scenario = network.SetAndClearRouterGateway(self.context) - scenario._create_network = mock.Mock(return_value=ext_net) - scenario._create_router = mock.Mock(return_value=router) - scenario._add_gateway_router = mock.Mock() - scenario._remove_gateway_router = mock.Mock() - - scenario.run(enable_snat, network_create_args, router_create_args) - - scenario._create_network.assert_called_once_with( - network_create_args) - scenario._create_router.assert_called_once_with(router_create_args) - scenario._add_gateway_router.assert_called_once_with(router, ext_net, - enable_snat) - scenario._remove_gateway_router.assert_called_once_with(router) - - def test_create_and_update_subnets(self): - network_create_args = {"router:external": True} - subnet_create_args = {"allocation_pools": []} - subnet_update_args = {"enabled_dhcp": True} - subnet_cidr_start = "default_cidr" - subnets_per_network = 5 - net = mock.MagicMock() - subnets = [mock.MagicMock() for _ in range(subnets_per_network)] - - scenario = network.CreateAndUpdateSubnets(self.context) - scenario._create_network = mock.Mock(return_value=net) - scenario._create_subnets = mock.Mock(return_value=subnets) - scenario._update_subnet = mock.Mock() - - scenario.run(subnet_update_args, - network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnet_cidr_start=subnet_cidr_start, - subnets_per_network=subnets_per_network) - - scenario._create_network.assert_called_once_with( - network_create_args) - scenario._create_subnets.assert_called_once_with( - net, subnet_create_args, subnet_cidr_start, subnets_per_network) - scenario._update_subnet.assert_has_calls( - [mock.call(s, subnet_update_args) for s in subnets]) - - def test_create_and_delete_subnets(self): - network_create_args = {"router:external": True} - subnet_create_args = {"allocation_pools": []} - subnet_cidr_start = "default_cidr" - subnets_per_network = 5 - net = mock.MagicMock() - subnets = [mock.MagicMock() for _ in range(subnets_per_network)] - - scenario = network.CreateAndDeleteSubnets(self.context) - scenario._get_or_create_network = mock.Mock(return_value=net) - scenario._create_subnets = mock.Mock(return_value=subnets) - scenario._delete_subnet = mock.Mock() - - scenario.run(network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnet_cidr_start=subnet_cidr_start, - subnets_per_network=subnets_per_network) - - scenario._get_or_create_network.assert_called_once_with( - network_create_args) - scenario._create_subnets.assert_called_once_with( - net, subnet_create_args, subnet_cidr_start, subnets_per_network) - scenario._delete_subnet.assert_has_calls( - [mock.call(s) for s in subnets]) - - def test_create_and_list_routers(self): - network_create_args = {"router:external": True} - subnet_create_args = {"allocation_pools": []} - subnet_cidr_start = "default_cidr" - subnets_per_network = 5 - router_create_args = {"admin_state_up": True} - - scenario = network.CreateAndListRouters(self.context) - scenario._create_network_structure = mock.Mock() - scenario._list_routers = mock.Mock() - - scenario.run(network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnet_cidr_start=subnet_cidr_start, - subnets_per_network=subnets_per_network, - router_create_args=router_create_args) - - scenario._create_network_structure.assert_called_once_with( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - scenario._list_routers.assert_called_once_with() - - def test_list_agents(self): - agent_args = { - "F": "id", - "sort-dir": "asc" - } - scenario = network.ListAgents(self.context) - scenario._list_agents = mock.Mock() - - scenario.run(agent_args=agent_args) - scenario._list_agents.assert_called_once_with(**agent_args) - - def test_create_and_update_routers(self): - router_update_args = {"admin_state_up": False} - network_create_args = {"router:external": True} - subnet_create_args = {"allocation_pools": []} - subnet_cidr_start = "default_cidr" - subnets_per_network = 5 - router_create_args = {"admin_state_up": True} - net = mock.MagicMock() - subnets = [mock.MagicMock() for i in range(subnets_per_network)] - routers = [mock.MagicMock() for i in range(subnets_per_network)] - - scenario = network.CreateAndUpdateRouters(self.context) - scenario._create_network_structure = mock.Mock( - return_value=(net, subnets, routers)) - scenario._update_router = mock.Mock() - - scenario.run(router_update_args, - network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnet_cidr_start=subnet_cidr_start, - subnets_per_network=subnets_per_network, - router_create_args=router_create_args) - - scenario._create_network_structure.assert_called_once_with( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - - update_calls = [mock.call(router, router_update_args) - for router in routers] - scenario._update_router.assert_has_calls(update_calls) - - def test_create_and_delete_routers(self): - network_create_args = {"router:external": True} - subnet_create_args = {"allocation_pools": []} - subnet_cidr_start = "default_cidr" - subnets_per_network = 5 - router_create_args = {"admin_state_up": True} - net = mock.MagicMock() - subnets = [mock.MagicMock() for i in range(subnets_per_network)] - routers = [mock.MagicMock() for i in range(subnets_per_network)] - - scenario = network.CreateAndDeleteRouters(self.context) - scenario._create_network_structure = mock.Mock( - return_value=(net, subnets, routers)) - scenario._remove_interface_router = mock.Mock() - scenario._delete_router = mock.Mock() - - scenario.run(network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnet_cidr_start=subnet_cidr_start, - subnets_per_network=subnets_per_network, - router_create_args=router_create_args) - - scenario._create_network_structure.assert_called_once_with( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - - scenario._remove_interface_router.assert_has_calls([ - mock.call(subnets[i]["subnet"], routers[i]["router"]) - for i in range(subnets_per_network)]) - scenario._delete_router.assert_has_calls( - [mock.call(router) for router in routers]) - - def test_create_and_show_routers(self): - network_create_args = {"router:external": True} - subnet_create_args = {"allocation_pools": []} - subnet_cidr_start = "default_cidr" - subnets_per_network = 5 - router_create_args = {"admin_state_up": True} - net = mock.MagicMock() - subnets = [mock.MagicMock() for i in range(subnets_per_network)] - routers = [mock.MagicMock() for i in range(subnets_per_network)] - - scenario = network.CreateAndShowRouters(self.context) - scenario._create_network_structure = mock.Mock( - return_value=(net, subnets, routers)) - scenario._show_router = mock.Mock() - - scenario.run(network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnet_cidr_start=subnet_cidr_start, - subnets_per_network=subnets_per_network, - router_create_args=router_create_args) - - scenario._create_network_structure.assert_called_once_with( - network_create_args, subnet_create_args, subnet_cidr_start, - subnets_per_network, router_create_args) - - scenario._show_router.assert_has_calls( - [mock.call(router) for router in routers]) - - def test_create_and_list_ports(self): - port_create_args = {"allocation_pools": []} - ports_per_network = 10 - network_create_args = {"router:external": True} - net = mock.MagicMock() - - scenario = network.CreateAndListPorts(self.context) - scenario._get_or_create_network = mock.Mock(return_value=net) - scenario._create_port = mock.MagicMock() - scenario._list_ports = mock.Mock() - - scenario.run(network_create_args=network_create_args, - port_create_args=port_create_args, - ports_per_network=ports_per_network) - scenario._get_or_create_network.assert_called_once_with( - network_create_args) - scenario._create_port.assert_has_calls( - [mock.call(net, port_create_args) - for _ in range(ports_per_network)]) - - scenario._list_ports.assert_called_once_with() - - def test_create_and_update_ports(self): - port_update_args = {"admin_state_up": False}, - port_create_args = {"allocation_pools": []} - ports_per_network = 10 - network_create_args = {"router:external": True} - net = mock.MagicMock() - ports = [mock.MagicMock() for _ in range(ports_per_network)] - - scenario = network.CreateAndUpdatePorts(self.context) - scenario._get_or_create_network = mock.Mock(return_value=net) - scenario._create_port = mock.Mock(side_effect=ports) - scenario._update_port = mock.Mock() - - scenario.run(port_update_args, - network_create_args=network_create_args, - port_create_args=port_create_args, - ports_per_network=ports_per_network) - scenario._get_or_create_network.assert_called_once_with( - network_create_args) - scenario._create_port.assert_has_calls( - [mock.call(net, port_create_args) - for _ in range(ports_per_network)]) - scenario._update_port.assert_has_calls( - [mock.call(p, port_update_args) for p in ports]) - - def test_create_and_show_ports_positive(self): - port_create_args = {"allocation_pools": []} - ports_per_network = 1 - network_create_args = {"router:external": True} - net = mock.MagicMock() - - scenario = network.CreateAndShowPorts(self.context) - scenario._get_or_create_network = mock.MagicMock(return_value=net) - scenario._create_port = mock.MagicMock() - scenario._show_port = mock.MagicMock() - port = {"port": {"id": 1, "name": "f"}} - port_info = {"port": {"id": 1, "name": "f", "status": "ACTIVE"}} - scenario._show_port.return_value = port_info - - # Positive case: - scenario._create_port.return_value = port - scenario.run(network_create_args=network_create_args, - port_create_args=port_create_args, - ports_per_network=ports_per_network) - scenario._get_or_create_network.assert_called_once_with( - network_create_args) - scenario._create_port.assert_called_with(net, port_create_args) - scenario._show_port.assert_called_with(port) - - def test_create_and_show_ports_negative(self): - port_create_args = {"allocation_pools": []} - ports_per_network = 1 - network_create_args = {"router:external": True} - net = mock.MagicMock() - - scenario = network.CreateAndShowPorts(self.context) - scenario._get_or_create_network = mock.MagicMock(return_value=net) - scenario._create_port = mock.MagicMock() - scenario._show_port = mock.MagicMock() - - # Negative case1: port isn't created - scenario._create_port.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - network_create_args, - port_create_args, - ports_per_network) - scenario._get_or_create_network.assert_called_once_with( - network_create_args) - scenario._create_port.assert_called_once_with(net, port_create_args) - - # Negative case2: port isn't show - port = {"port": {"id": 1, "name": "f1"}} - port_info = {"port": {"id": 2, "name": "f2", "status": "ACTIVE"}} - scenario._show_port.return_value = port_info - scenario._create_port.return_value = port - - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - network_create_args, - port_create_args, - ports_per_network) - - scenario._get_or_create_network.assert_called_with( - network_create_args) - scenario._create_port.assert_called_with(net, port_create_args) - scenario._show_port.assert_called_with(port) - - def test_create_and_delete_ports(self): - port_create_args = {"allocation_pools": []} - ports_per_network = 10 - network_create_args = {"router:external": True} - net = mock.MagicMock() - ports = [mock.MagicMock() for _ in range(ports_per_network)] - - scenario = network.CreateAndDeletePorts(self.context) - scenario._get_or_create_network = mock.Mock(return_value=net) - scenario._create_port = mock.Mock(side_effect=ports) - scenario._delete_port = mock.Mock() - - scenario.run(network_create_args=network_create_args, - port_create_args=port_create_args, - ports_per_network=ports_per_network) - scenario._get_or_create_network.assert_called_once_with( - network_create_args) - scenario._create_port.assert_has_calls( - [mock.call(net, port_create_args) - for _ in range(ports_per_network)]) - scenario._delete_port.assert_has_calls( - [mock.call(p) for p in ports]) - - @ddt.data( - {"floating_network": "ext-net"}, - {"floating_network": "ext-net", - "floating_ip_args": {"floating_ip_address": "1.1.1.1"}}, - ) - @ddt.unpack - def test_create_and_list_floating_ips(self, floating_network=None, - floating_ip_args=None): - scenario = network.CreateAndListFloatingIps(self.context) - floating_ip_args = floating_ip_args or {} - scenario._create_floatingip = mock.Mock() - scenario._list_floating_ips = mock.Mock() - scenario.run(floating_network=floating_network, - floating_ip_args=floating_ip_args) - scenario._create_floatingip.assert_called_once_with( - floating_network, **floating_ip_args) - scenario._list_floating_ips.assert_called_once_with() - - @ddt.data( - {"floating_network": "ext-net"}, - {"floating_network": "ext-net", - "floating_ip_args": {"floating_ip_address": "1.1.1.1"}}, - ) - @ddt.unpack - def test_create_and_delete_floating_ips(self, floating_network=None, - floating_ip_args=None): - scenario = network.CreateAndDeleteFloatingIps(self.context) - floating_ip_args = floating_ip_args or {} - fip = {"floatingip": {"id": "floating-ip-id"}} - scenario._create_floatingip = mock.Mock(return_value=fip) - scenario._delete_floating_ip = mock.Mock() - scenario.run(floating_network=floating_network, - floating_ip_args=floating_ip_args) - scenario._create_floatingip.assert_called_once_with( - floating_network, **floating_ip_args) - scenario._delete_floating_ip.assert_called_once_with( - scenario._create_floatingip.return_value["floatingip"]) - - @mock.patch("%s.DeleteSubnets._delete_subnet" % BASE) - def test_delete_subnets(self, mock__delete_subnet): - # do not guess what user will be used - self.context["user_choice_method"] = "round_robin" - # if it is the 4th iteration, the second user from the second tenant - # should be taken, which means that the second subnets from each - # tenant network should be removed. - self.context["iteration"] = 4 - # in case of `round_robin` the user will be selected from the list of - # available users of particular tenant, not from the list of all - # tenants (i.e random choice). BUT to trigger selecting user and - # tenant `users` key should present in context dict - self.context["users"] = [] - - self.context["tenants"] = { - # this should not be used - "uuid-1": { - "id": "uuid-1", - "networks": [{"subnets": ["subnet-1"]}], - "users": [{"id": "user-1", "credential": mock.MagicMock()}, - {"id": "user-2", "credential": mock.MagicMock()}] - }, - # this is expected user - "uuid-2": { - "id": "uuid-2", - "networks": [ - {"subnets": ["subnet-2", "subnet-3"]}, - {"subnets": ["subnet-4", "subnet-5"]}], - "users": [{"id": "user-3", "credential": mock.MagicMock()}, - {"id": "user-4", "credential": mock.MagicMock()}] - } - } - - scenario = network.DeleteSubnets(self.context) - self.assertEqual("user-4", scenario.context["user"]["id"], - "Unexpected user is taken. The wrong subnets can be " - "affected(removed).") - - scenario.run() - - self.assertEqual( - [ - mock.call({"subnet": {"id": "subnet-3"}}), - mock.call({"subnet": {"id": "subnet-5"}}) - ], - mock__delete_subnet.call_args_list) diff --git a/tests/unit/plugins/openstack/scenarios/neutron/test_security_groups.py b/tests/unit/plugins/openstack/scenarios/neutron/test_security_groups.py deleted file mode 100644 index 5d3054165b..0000000000 --- a/tests/unit/plugins/openstack/scenarios/neutron/test_security_groups.py +++ /dev/null @@ -1,345 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions as rally_exceptions -from rally.plugins.openstack.scenarios.neutron import security_groups -from tests.unit import test - - -@ddt.ddt -class NeutronSecurityGroup(test.TestCase): - - @ddt.data( - {}, - {"security_group_create_args": {}}, - {"security_group_create_args": {"description": "fake-description"}}, - ) - @ddt.unpack - def test_create_and_list_security_groups( - self, security_group_create_args=None): - scenario = security_groups.CreateAndListSecurityGroups() - - security_group_data = security_group_create_args or {} - scenario._create_security_group = mock.Mock() - scenario._list_security_groups = mock.Mock() - scenario.run(security_group_create_args=security_group_create_args) - scenario._create_security_group.assert_called_once_with( - **security_group_data) - scenario._list_security_groups.assert_called_once_with() - - @ddt.data( - {}, - {"security_group_create_args": {}}, - {"security_group_create_args": {"description": "fake-description"}}, - ) - @ddt.unpack - def test_create_and_show_security_group( - self, security_group_create_args=None): - scenario = security_groups.CreateAndShowSecurityGroup() - security_group = mock.Mock() - security_group_data = security_group_create_args or {} - scenario._create_security_group = mock.Mock() - scenario._show_security_group = mock.Mock() - - # Positive case - scenario._create_security_group.return_value = security_group - scenario.run(security_group_create_args=security_group_create_args) - scenario._create_security_group.assert_called_once_with( - **security_group_data) - scenario._show_security_group.assert_called_once_with( - scenario._create_security_group.return_value) - - @ddt.data( - {}, - {"security_group_create_args": {}}, - {"security_group_create_args": {"description": "fake-description"}}, - ) - @ddt.unpack - def test_create_and_show_security_group_with_none_group( - self, security_group_create_args=None): - scenario = security_groups.CreateAndShowSecurityGroup() - security_group_data = security_group_create_args or {} - scenario._create_security_group = mock.Mock() - scenario._show_security_group = mock.Mock() - - # Negative case: security_group isn't created - scenario._create_security_group.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, security_group_create_args) - scenario._create_security_group.assert_called_with( - **security_group_data) - - @ddt.data( - {}, - {"security_group_create_args": {}}, - {"security_group_create_args": {"description": "fake-description"}}, - ) - @ddt.unpack - def test_create_and_delete_security_groups( - self, security_group_create_args=None): - scenario = security_groups.CreateAndDeleteSecurityGroups() - security_group_data = security_group_create_args or {} - scenario._create_security_group = mock.Mock() - scenario._delete_security_group = mock.Mock() - scenario.run(security_group_create_args=security_group_create_args) - scenario._create_security_group.assert_called_once_with( - **security_group_data) - scenario._delete_security_group.assert_called_once_with( - scenario._create_security_group.return_value) - - @ddt.data( - {}, - {"security_group_create_args": {}}, - {"security_group_create_args": {"description": "fake-description"}}, - {"security_group_update_args": {}}, - {"security_group_update_args": {"description": "fake-updated-descr"}}, - ) - @ddt.unpack - def test_create_and_update_security_groups( - self, security_group_create_args=None, - security_group_update_args=None): - scenario = security_groups.CreateAndUpdateSecurityGroups() - security_group_data = security_group_create_args or {} - security_group_update_data = security_group_update_args or {} - scenario._create_security_group = mock.Mock() - scenario._update_security_group = mock.Mock() - scenario.run(security_group_create_args=security_group_create_args, - security_group_update_args=security_group_update_args) - scenario._create_security_group.assert_called_once_with( - **security_group_data) - scenario._update_security_group.assert_called_once_with( - scenario._create_security_group.return_value, - **security_group_update_data) - - @ddt.data( - {}, - {"security_group_args": {}}, - {"security_group_args": {"description": "fake-description"}}, - {"security_group_rule_args": {}}, - {"security_group_rule_args": {"description": "fake-rule-descr"}}, - ) - @ddt.unpack - def test_create_and_list_security_group_rules( - self, security_group_args=None, - security_group_rule_args=None): - scenario = security_groups.CreateAndListSecurityGroupRules() - - security_group_data = security_group_args or {} - security_group_rule_data = security_group_rule_args or {} - - security_group = mock.MagicMock() - security_group_rule = {"security_group_rule": {"id": 1, "name": "f1"}} - scenario._create_security_group = mock.MagicMock() - scenario._create_security_group_rule = mock.MagicMock() - scenario._list_security_group_rules = mock.MagicMock() - - # Positive case - scenario._create_security_group.return_value = security_group - scenario._create_security_group_rule.return_value = security_group_rule - scenario._list_security_group_rules.return_value = { - "security_group_rules": [{"id": 1, "name": "f1"}, - {"id": 2, "name": "f2"}, - {"id": 3, "name": "f3"}]} - scenario.run(security_group_args=security_group_data, - security_group_rule_args=security_group_rule_data) - - scenario._create_security_group.assert_called_once_with( - **security_group_data) - scenario._create_security_group_rule.assert_called_once_with( - security_group["security_group"]["id"], - **security_group_rule_data) - scenario._list_security_group_rules.assert_called_once_with() - - @ddt.data( - {}, - {"security_group_args": {}}, - {"security_group_args": {"description": "fake-description"}}, - {"security_group_rule_args": {}}, - {"security_group_rule_args": {"description": "fake-rule-descr"}}, - ) - @ddt.unpack - def test_create_and_list_security_group_rules_with_fails( - self, security_group_args=None, - security_group_rule_args=None): - scenario = security_groups.CreateAndListSecurityGroupRules() - - security_group_data = security_group_args or {} - security_group_rule_data = security_group_rule_args or {} - - security_group = mock.MagicMock() - security_group_rule = {"security_group_rule": {"id": 1, "name": "f1"}} - scenario._create_security_group = mock.MagicMock() - scenario._create_security_group_rule = mock.MagicMock() - scenario._list_security_group_rules = mock.MagicMock() - scenario._create_security_group_rule.return_value = security_group_rule - scenario._list_security_group_rules.return_value = { - "security_group_rules": [{"id": 1, "name": "f1"}, - {"id": 2, "name": "f2"}, - {"id": 3, "name": "f3"}]} - - # Negative case1: security_group isn't created - scenario._create_security_group.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - security_group_data, - security_group_rule_data) - scenario._create_security_group.assert_called_with( - **security_group_data) - - # Negative case2: security_group_rule isn't created - scenario._create_security_group.return_value = security_group - scenario._create_security_group_rule.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - security_group_data, - security_group_rule_data) - scenario._create_security_group.assert_called_with( - **security_group_data) - scenario._create_security_group_rule.assert_called_with( - security_group["security_group"]["id"], - **security_group_rule_data) - - # Negative case3: security_group_rule isn't listed - scenario._create_security_group.return_value = security_group - scenario._create_security_group_rule.return_value = mock.MagicMock() - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - security_group_data, - security_group_rule_data) - - scenario._create_security_group.assert_called_with( - **security_group_data) - scenario._create_security_group_rule.assert_called_with( - security_group["security_group"]["id"], - **security_group_rule_data) - scenario._list_security_group_rules.assert_called_with() - - @ddt.data( - {}, - {"security_group_args": {}}, - {"security_group_args": {"description": "fake-description"}}, - {"security_group_rule_args": {}}, - {"security_group_rule_args": {"description": "fake-rule-descr"}} - ) - @ddt.unpack - def test_create_and_show_security_group_rule( - self, security_group_args=None, - security_group_rule_args=None): - scenario = security_groups.CreateAndShowSecurityGroupRule() - - security_group_data = security_group_args or {} - security_group_rule_data = security_group_rule_args or {} - security_group = mock.MagicMock() - security_group_rule = {"security_group_rule": {"id": 1, "name": "f1"}} - scenario._create_security_group = mock.MagicMock() - scenario._create_security_group_rule = mock.MagicMock() - scenario._show_security_group_rule = mock.MagicMock() - - # Positive case - scenario._create_security_group.return_value = security_group - scenario._create_security_group_rule.return_value = security_group_rule - scenario.run(security_group_args=security_group_data, - security_group_rule_args=security_group_rule_data) - - scenario._create_security_group.assert_called_once_with( - **security_group_data) - scenario._create_security_group_rule.assert_called_once_with( - security_group["security_group"]["id"], - **security_group_rule_data) - scenario._show_security_group_rule.assert_called_once_with( - security_group_rule["security_group_rule"]["id"]) - - @ddt.data( - {}, - {"security_group_args": {}}, - {"security_group_args": {"description": "fake-description"}}, - {"security_group_rule_args": {}}, - {"security_group_rule_args": {"description": "fake-rule-descr"}} - ) - @ddt.unpack - def test_create_and_delete_security_group_rule( - self, security_group_args=None, - security_group_rule_args=None): - scenario = security_groups.CreateAndDeleteSecurityGroupRule() - - security_group_data = security_group_args or {} - security_group_rule_data = security_group_rule_args or {} - security_group = mock.MagicMock() - security_group_rule = {"security_group_rule": {"id": 1, "name": "f1"}} - scenario._create_security_group = mock.MagicMock() - scenario._create_security_group_rule = mock.MagicMock() - scenario._delete_security_group_rule = mock.MagicMock() - scenario._delete_security_group = mock.MagicMock() - - # Positive case - scenario._create_security_group.return_value = security_group - scenario._create_security_group_rule.return_value = security_group_rule - scenario.run(security_group_args=security_group_data, - security_group_rule_args=security_group_rule_data) - - scenario._create_security_group.assert_called_once_with( - **security_group_data) - scenario._create_security_group_rule.assert_called_once_with( - security_group["security_group"]["id"], - **security_group_rule_data) - scenario._delete_security_group_rule.assert_called_once_with( - security_group_rule["security_group_rule"]["id"]) - scenario._delete_security_group.assert_called_once_with( - security_group) - - @ddt.data( - {}, - {"security_group_args": {}}, - {"security_group_args": {"description": "fake-description"}}, - {"security_group_rule_args": {}}, - {"security_group_rule_args": {"description": "fake-rule-descr"}}, - ) - @ddt.unpack - def test_create_and_show_security_group_rule_with_fails( - self, security_group_args=None, - security_group_rule_args=None): - scenario = security_groups.CreateAndShowSecurityGroupRule() - - security_group_data = security_group_args or {} - security_group_rule_data = security_group_rule_args or {} - - security_group = mock.MagicMock() - security_group_rule = {"security_group_rule": {"id": 1, "name": "f1"}} - scenario._create_security_group = mock.MagicMock() - scenario._create_security_group_rule = mock.MagicMock() - scenario._show_security_group_rule = mock.MagicMock() - scenario._create_security_group_rule.return_value = security_group_rule - - # Negative case1: security_group isn't created - scenario._create_security_group.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - security_group_data, - security_group_rule_data) - scenario._create_security_group.assert_called_with( - **security_group_data) - - # Negative case2: security_group_rule isn't created - scenario._create_security_group.return_value = security_group - scenario._create_security_group_rule.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - security_group_data, - security_group_rule_data) - scenario._create_security_group.assert_called_with( - **security_group_data) - scenario._create_security_group_rule.assert_called_with( - security_group["security_group"]["id"], - **security_group_rule_data) diff --git a/tests/unit/plugins/openstack/scenarios/neutron/test_utils.py b/tests/unit/plugins/openstack/scenarios/neutron/test_utils.py deleted file mode 100644 index 5881994975..0000000000 --- a/tests/unit/plugins/openstack/scenarios/neutron/test_utils.py +++ /dev/null @@ -1,1318 +0,0 @@ -# Copyright 2013: Intel Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.neutron import utils -from tests.unit import test - -NEUTRON_UTILS = "rally.plugins.openstack.scenarios.neutron.utils" - - -@ddt.ddt -class NeutronScenarioTestCase(test.ScenarioTestCase): - - def setUp(self): - super(NeutronScenarioTestCase, self).setUp() - self.network = mock.Mock() - self.scenario = utils.NeutronScenario(self.context) - - self.random_name = "random_name" - self.scenario.generate_random_name = mock.Mock( - return_value=self.random_name) - - def test__get_network_id(self): - networks = [{"id": "foo-id", "name": "foo-network"}, - {"id": "bar-id", "name": "bar-network"}] - network_id = "foo-id" - - # Valid network-name - network = "foo-network" - self.scenario._list_networks = mock.Mock(return_value=networks) - resultant_network_id = self.scenario._get_network_id(network) - self.assertEqual(network_id, resultant_network_id) - self.scenario._list_networks.assert_called_once_with() - - self.scenario._list_networks.reset_mock() - - # Valid network-id - network = "foo-id" - resultant_network_id = self.scenario._get_network_id(network) - self.assertEqual(network_id, resultant_network_id) - self.scenario._list_networks.assert_called_once_with() - self.scenario._list_networks.reset_mock() - - # Invalid network-name - network = "absent-network" - self.assertRaises(exceptions.NotFoundException, - self.scenario._get_network_id, network) - self.scenario._list_networks.assert_called_once_with() - - def test_create_network(self): - self.clients("neutron").create_network.return_value = self.network - - network_data = {"admin_state_up": False} - expected_network_data = {"network": network_data} - network = self.scenario._create_network(network_data) - self.assertEqual(self.network, network) - self.clients("neutron").create_network.assert_called_once_with( - expected_network_data) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_network") - - def test_list_networks(self): - networks_list = [] - networks_dict = {"networks": networks_list} - self.clients("neutron").list_networks.return_value = networks_dict - - # without atomic action - return_networks_list = self.scenario._list_networks() - self.assertEqual(networks_list, return_networks_list) - - # with atomic action - return_networks_list = self.scenario._list_networks() - self.assertEqual(networks_list, return_networks_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_networks", count=2) - - def test_show_network(self): - network = { - "network": { - "id": "fake-id", - "name": "fake-name", - "admin_state_up": False - } - } - - return_network = self.scenario._show_network(network) - self.assertEqual(self.clients("neutron").show_network.return_value, - return_network) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.show_network") - - def test_show_router(self): - router = { - "router": { - "id": "fake-id", - "name": "fake-name", - "admin_state_up": False - } - } - - return_router = self.scenario._show_router(router) - self.assertEqual(self.clients("neutron").show_router.return_value, - return_router) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.show_router") - - def test_update_network(self): - expected_network = { - "network": { - "name": self.scenario.generate_random_name.return_value, - "admin_state_up": False, - "fakearg": "fake" - } - } - self.clients("neutron").update_network.return_value = expected_network - - network = {"network": {"name": "network-name", "id": "network-id"}} - network_update_args = {"name": "foo", - "admin_state_up": False, - "fakearg": "fake"} - - result_network = self.scenario._update_network(network, - network_update_args) - self.clients("neutron").update_network.assert_called_once_with( - network["network"]["id"], expected_network) - self.assertEqual(expected_network, result_network) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.update_network") - - def test_delete_network(self): - network_create_args = {} - network = self.scenario._create_network(network_create_args) - self.scenario._delete_network(network) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_network") - - @mock.patch("%s.network_wrapper" % NEUTRON_UTILS) - def test_create_subnet(self, mock_network_wrapper): - network_id = "fake-id" - start_cidr = "192.168.0.0/24" - mock_network_wrapper.generate_cidr.return_value = "192.168.0.0/24" - - network = {"network": {"id": network_id}} - expected_subnet_data = { - "subnet": { - "network_id": network_id, - "cidr": start_cidr, - "ip_version": self.scenario.SUBNET_IP_VERSION, - "name": self.scenario.generate_random_name.return_value - } - } - - # Default options - subnet_data = {"network_id": network_id} - self.scenario._create_subnet(network, subnet_data, start_cidr) - self.clients("neutron").create_subnet.assert_called_once_with( - expected_subnet_data) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_subnet") - - self.clients("neutron").create_subnet.reset_mock() - - # Custom options - extras = {"cidr": "192.168.16.0/24", "allocation_pools": []} - mock_network_wrapper.generate_cidr.return_value = "192.168.16.0/24" - subnet_data.update(extras) - expected_subnet_data["subnet"].update(extras) - self.scenario._create_subnet(network, subnet_data) - self.clients("neutron").create_subnet.assert_called_once_with( - expected_subnet_data) - - def test_list_subnets(self): - subnets = [{"name": "fake1"}, {"name": "fake2"}] - self.clients("neutron").list_subnets.return_value = { - "subnets": subnets - } - result = self.scenario._list_subnets() - self.assertEqual(subnets, result) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_subnets") - - def test_show_subnet(self): - subnet = {"subnet": {"name": "fake-name", "id": "fake-id"}} - - result_subnet = self.scenario._show_subnet(subnet) - self.assertEqual(self.clients("neutron").show_subnet.return_value, - result_subnet) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.show_subnet") - - def test_update_subnet(self): - expected_subnet = { - "subnet": { - "name": self.scenario.generate_random_name.return_value, - "enable_dhcp": False, - "fakearg": "fake" - } - } - self.clients("neutron").update_subnet.return_value = expected_subnet - - subnet = {"subnet": {"name": "subnet-name", "id": "subnet-id"}} - subnet_update_args = {"name": "foo", "enable_dhcp": False, - "fakearg": "fake"} - - result_subnet = self.scenario._update_subnet(subnet, - subnet_update_args) - self.clients("neutron").update_subnet.assert_called_once_with( - subnet["subnet"]["id"], expected_subnet) - self.assertEqual(expected_subnet, result_subnet) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.update_subnet") - - def test_delete_subnet(self): - network = self.scenario._create_network({}) - subnet = self.scenario._create_subnet(network, {}) - self.scenario._delete_subnet(subnet) - - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_subnet") - - def test_create_router(self): - router = mock.Mock() - self.clients("neutron").create_router.return_value = router - - # Default options - result_router = self.scenario._create_router({}) - self.clients("neutron").create_router.assert_called_once_with({ - "router": { - "name": self.scenario.generate_random_name.return_value - } - }) - self.assertEqual(result_router, router) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_router") - - def test_create_router_with_ext_gw(self): - router = mock.Mock() - external_network = [{"id": "ext-net", "router:external": True}] - self.scenario._list_networks = mock.Mock(return_value=external_network) - self.clients("neutron").create_router.return_value = router - self.clients("neutron").list_extensions.return_value = { - "extensions": [{"alias": "ext-gw-mode"}]} - - # External_gw options - gw_info = {"network_id": external_network[0]["id"], - "enable_snat": True} - router_data = { - "name": self.scenario.generate_random_name.return_value, - "external_gateway_info": gw_info - } - result_router = self.scenario._create_router({}, external_gw=True) - self.clients("neutron").create_router.assert_called_once_with( - {"router": router_data}) - self.assertEqual(result_router, router) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "neutron.create_router") - - def test_create_router_with_ext_gw_but_no_ext_net(self): - router = mock.Mock() - external_network = [{"id": "ext-net", "router:external": False}] - self.scenario._list_networks = mock.Mock(return_value=external_network) - self.clients("neutron").create_router.return_value = router - self.clients("neutron").list_extensions.return_value = { - "extensions": [{"alias": "ext-gw-mode"}]} - - # External_gw options with no external networks in list_networks() - result_router = self.scenario._create_router({}, external_gw=True) - self.clients("neutron").create_router.assert_called_once_with({ - "router": {"name": self.scenario.generate_random_name.return_value} - }) - self.assertEqual(result_router, router) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_router") - - def test_create_router_with_ext_gw_but_no_ext_gw_mode_extension(self): - router = mock.Mock() - external_network = [{"id": "ext-net", "router:external": True}] - self.scenario._list_networks = mock.Mock(return_value=external_network) - self.clients("neutron").create_router.return_value = router - self.clients("neutron").list_extensions.return_value = { - "extensions": []} - - # External_gw options - gw_info = {"network_id": external_network[0]["id"]} - router_data = { - "name": self.scenario.generate_random_name.return_value, - "external_gateway_info": gw_info - } - result_router = self.scenario._create_router({}, external_gw=True) - self.clients("neutron").create_router.assert_called_once_with( - {"router": router_data}) - self.assertEqual(result_router, router) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "neutron.create_router") - - def test_create_router_explicit(self): - router = mock.Mock() - self.clients("neutron").create_router.return_value = router - - # Custom options - router_data = {"name": "explicit_name", "admin_state_up": True} - result_router = self.scenario._create_router(router_data) - self.clients("neutron").create_router.assert_called_once_with( - {"router": router_data}) - self.assertEqual(result_router, router) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_router") - - def test_list_routers(self): - routers = [mock.Mock()] - self.clients("neutron").list_routers.return_value = { - "routers": routers} - self.assertEqual(routers, self.scenario._list_routers()) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_routers") - - def test_list_agents(self): - agents = [mock.Mock()] - self.clients("neutron").list_agents.return_value = { - "agents": agents} - self.assertEqual(agents, self.scenario._list_agents()) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_agents") - - def test_update_router(self): - expected_router = { - "router": { - "name": self.scenario.generate_random_name.return_value, - "admin_state_up": False, - "fakearg": "fake" - } - } - self.clients("neutron").update_router.return_value = expected_router - - router = { - "router": { - "id": "router-id", - "name": "router-name", - "admin_state_up": True - } - } - router_update_args = {"name": "foo", - "admin_state_up": False, - "fakearg": "fake"} - - result_router = self.scenario._update_router(router, - router_update_args) - self.clients("neutron").update_router.assert_called_once_with( - router["router"]["id"], expected_router) - self.assertEqual(expected_router, result_router) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.update_router") - - def test_delete_router(self): - router = self.scenario._create_router({}) - self.scenario._delete_router(router) - self.clients("neutron").delete_router.assert_called_once_with( - router["router"]["id"]) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_router") - - def test_remove_interface_router(self): - subnet = {"name": "subnet-name", "id": "subnet-id"} - router_data = {"id": 1} - router = self.scenario._create_router(router_data) - self.scenario._add_interface_router(subnet, router) - self.scenario._remove_interface_router(subnet, router) - mock_remove_router = self.clients("neutron").remove_interface_router - mock_remove_router.assert_called_once_with( - router["id"], {"subnet_id": subnet["id"]}) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.remove_interface_router") - - def test_add_gateway_router(self): - ext_net = { - "network": { - "name": "extnet-name", - "id": "extnet-id" - } - } - router = { - "router": { - "name": "router-name", - "id": "router-id" - } - } - enable_snat = "fake_snat" - gw_info = {"network_id": ext_net["network"]["id"], - "enable_snat": enable_snat} - self.clients("neutron").list_extensions.return_value = { - "extensions": [{"alias": "ext-gw-mode"}]} - - self.scenario._add_gateway_router(router, ext_net, enable_snat) - self.clients("neutron").add_gateway_router.assert_called_once_with( - router["router"]["id"], gw_info) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.add_gateway_router") - - def test_add_gateway_router_without_ext_gw_mode_extension(self): - ext_net = { - "network": { - "name": "extnet-name", - "id": "extnet-id" - } - } - router = { - "router": { - "name": "router-name", - "id": "router-id" - } - } - enable_snat = "fake_snat" - gw_info = {"network_id": ext_net["network"]["id"]} - self.clients("neutron").list_extensions.return_value = { - "extensions": {}} - - self.scenario._add_gateway_router(router, ext_net, enable_snat) - self.clients("neutron").add_gateway_router.assert_called_once_with( - router["router"]["id"], gw_info) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.add_gateway_router") - - def test_remove_gateway_router(self): - router = { - "router": { - "name": "router-name", - "id": "router-id" - } - } - self.scenario._remove_gateway_router(router) - self.clients("neutron").remove_gateway_router.assert_called_once_with( - router["router"]["id"]) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.remove_gateway_router") - - def test_SUBNET_IP_VERSION(self): - """Curent NeutronScenario implementation supports only IPv4.""" - self.assertEqual(4, utils.NeutronScenario.SUBNET_IP_VERSION) - - def test_create_port(self): - net_id = "network-id" - net = {"network": {"id": net_id}} - expected_port_args = { - "port": { - "network_id": net_id, - "name": self.scenario.generate_random_name.return_value - } - } - - # Defaults - port_create_args = {} - self.scenario._create_port(net, port_create_args) - self.clients("neutron" - ).create_port.assert_called_once_with(expected_port_args) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_port") - - self.clients("neutron").create_port.reset_mock() - - # Custom options - port_args = {"admin_state_up": True} - expected_port_args["port"].update(port_args) - self.scenario._create_port(net, port_args) - self.clients("neutron" - ).create_port.assert_called_once_with(expected_port_args) - - def test_list_ports(self): - ports = [{"name": "port1"}, {"name": "port2"}] - self.clients("neutron").list_ports.return_value = {"ports": ports} - self.assertEqual(ports, self.scenario._list_ports()) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_ports") - - def test_show_port(self): - expect_port = { - "port": { - "id": "port-id", - "name": "port-name", - "admin_state_up": True - } - } - self.clients("neutron").show_port.return_value = expect_port - self.assertEqual(expect_port, self.scenario._show_port(expect_port)) - self.clients("neutron").show_port.assert_called_once_with( - expect_port["port"]["id"]) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.show_port") - - def test_update_port(self): - expected_port = { - "port": { - "admin_state_up": False, - "fakearg": "fake", - "name": self.scenario.generate_random_name.return_value - } - } - self.clients("neutron").update_port.return_value = expected_port - - port = { - "port": { - "id": "port-id", - "name": "port-name", - "admin_state_up": True - } - } - port_update_args = { - "admin_state_up": False, - "fakearg": "fake" - } - - result_port = self.scenario._update_port(port, port_update_args) - self.clients("neutron").update_port.assert_called_once_with( - port["port"]["id"], expected_port) - self.assertEqual(expected_port, result_port) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.update_port") - - def test_delete_port(self): - network = self.scenario._create_network({}) - port = self.scenario._create_port(network, {}) - self.scenario._delete_port(port) - - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_port") - - @ddt.data( - {"context": {"tenant": {"networks": - [mock.MagicMock(), mock.MagicMock()]}}}, - {"network_create_args": {"fakearg": "fake"}, - "context": {"tenant": {"networks": - [mock.MagicMock(), mock.MagicMock()]}}}) - @ddt.unpack - @mock.patch("random.choice", side_effect=lambda l: l[0]) - def test_get_or_create_network(self, mock_random_choice, - network_create_args=None, context=None): - self.scenario.context = context - self.scenario._create_network = mock.Mock( - return_value={"network": mock.Mock()}) - - network = self.scenario._get_or_create_network(network_create_args) - - # ensure that the return value is the proper type either way - self.assertIn("network", network) - - if "networks" in context["tenant"]: - self.assertEqual(network, - {"network": context["tenant"]["networks"][0]}) - self.assertFalse(self.scenario._create_network.called) - else: - self.assertEqual(network, - self.scenario._create_network.return_value) - self.scenario._create_network.assert_called_once_with( - network_create_args or {}) - - @mock.patch("%s.NeutronScenario._create_subnet" % NEUTRON_UTILS) - @mock.patch("%s.NeutronScenario._create_network" % NEUTRON_UTILS) - def test_create_network_and_subnets(self, - mock__create_network, - mock__create_subnet): - mock__create_network.return_value = {"network": {"id": "fake-id"}} - mock__create_subnet.return_value = { - "subnet": { - "name": "subnet-name", - "id": "subnet-id", - "enable_dhcp": False - } - } - - network_create_args = {} - subnet_create_args = {} - subnets_per_network = 4 - - # Default options - self.scenario._create_network_and_subnets( - network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnets_per_network=subnets_per_network) - - mock__create_network.assert_called_once_with({}) - mock__create_subnet.assert_has_calls( - [mock.call({"network": {"id": "fake-id"}}, - {}, "1.0.0.0/24")] * subnets_per_network) - - mock__create_network.reset_mock() - mock__create_subnet.reset_mock() - - # Custom options - self.scenario._create_network_and_subnets( - network_create_args=network_create_args, - subnet_create_args={"allocation_pools": []}, - subnet_cidr_start="10.10.10.0/24", - subnets_per_network=subnets_per_network) - - mock__create_network.assert_called_once_with({}) - mock__create_subnet.assert_has_calls( - [mock.call({"network": {"id": "fake-id"}}, - {"allocation_pools": []}, - "10.10.10.0/24")] * subnets_per_network) - - def test_list_floating_ips(self): - fips_list = [{"id": "floating-ip-id"}] - fips_dict = {"floatingips": fips_list} - self.clients("neutron").list_floatingips.return_value = fips_dict - self.assertEqual(self.scenario._list_floating_ips(), - self.clients("neutron").list_floatingips.return_value) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_floating_ips") - - def test_delete_floating_ip(self): - fip = {"floatingip": {"id": "fake-id"}} - self.scenario._delete_floating_ip(fip["floatingip"]) - self.clients("neutron").delete_floatingip.assert_called_once_with( - fip["floatingip"]["id"]) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_floating_ip") - - @ddt.data( - {}, - {"router_create_args": {"admin_state_up": False}}, - {"network_create_args": {"router:external": True}, - "subnet_create_args": {"allocation_pools": []}, - "subnet_cidr_start": "default_cidr", - "subnets_per_network": 3, - "router_create_args": {"admin_state_up": False}}) - @ddt.unpack - def test_create_network_structure(self, network_create_args=None, - subnet_create_args=None, - subnet_cidr_start=None, - subnets_per_network=None, - router_create_args=None): - network = mock.MagicMock() - - router_create_args = router_create_args or {} - - subnets = [] - routers = [] - router_create_calls = [] - for i in range(subnets_per_network or 1): - subnets.append(mock.MagicMock()) - routers.append(mock.MagicMock()) - router_create_calls.append(mock.call(router_create_args)) - - self.scenario._create_network = mock.Mock(return_value=network) - self.scenario._create_subnets = mock.Mock(return_value=subnets) - self.scenario._create_router = mock.Mock(side_effect=routers) - self.scenario._add_interface_router = mock.Mock() - - actual = self.scenario._create_network_structure(network_create_args, - subnet_create_args, - subnet_cidr_start, - subnets_per_network, - router_create_args) - self.assertEqual((network, subnets, routers), actual) - self.scenario._create_network.assert_called_once_with( - network_create_args or {}) - self.scenario._create_subnets.assert_called_once_with( - network, - subnet_create_args, - subnet_cidr_start, - subnets_per_network) - self.scenario._create_router.assert_has_calls(router_create_calls) - - add_iface_calls = [mock.call(subnets[i]["subnet"], - routers[i]["router"]) - for i in range(subnets_per_network or 1)] - self.scenario._add_interface_router.assert_has_calls(add_iface_calls) - - def test_delete_v1_pool(self): - pool = {"pool": {"id": "fake-id"}} - self.scenario._delete_v1_pool(pool["pool"]) - self.clients("neutron").delete_pool.assert_called_once_with( - pool["pool"]["id"]) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_pool") - - def test_update_pool(self): - expected_pool = { - "pool": { - "name": self.scenario.generate_random_name.return_value, - "admin_state_up": False, - "fakearg": "fake" - } - } - self.clients("neutron").update_pool.return_value = expected_pool - - pool = {"pool": {"name": "pool-name", "id": "pool-id"}} - pool_update_args = {"name": "foo", - "admin_state_up": False, - "fakearg": "fake"} - - result_pool = self.scenario._update_v1_pool(pool, **pool_update_args) - self.assertEqual(expected_pool, result_pool) - self.clients("neutron").update_pool.assert_called_once_with( - pool["pool"]["id"], expected_pool) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.update_pool") - - def test_list_v1_pools(self): - pools_list = [] - pools_dict = {"pools": pools_list} - self.clients("neutron").list_pools.return_value = pools_dict - return_pools_dict = self.scenario._list_v1_pools() - self.assertEqual(pools_dict, return_pools_dict) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_pools") - - def test_list_v1_vips(self): - vips_list = [] - vips_dict = {"vips": vips_list} - self.clients("neutron").list_vips.return_value = vips_dict - return_vips_dict = self.scenario._list_v1_vips() - self.assertEqual(vips_dict, return_vips_dict) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_vips") - - def test_delete_v1_vip(self): - vip = {"vip": {"id": "fake-id"}} - self.scenario._delete_v1_vip(vip["vip"]) - self.clients("neutron").delete_vip.assert_called_once_with( - vip["vip"]["id"]) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_vip") - - def test_update_v1_vip(self): - expected_vip = { - "vip": { - "name": self.scenario.generate_random_name.return_value, - "admin_state_up": False - } - } - self.clients("neutron").update_vip.return_value = expected_vip - - vip = {"vip": {"name": "vip-name", "id": "vip-id"}} - vip_update_args = {"name": "foo", "admin_state_up": False} - - result_vip = self.scenario._update_v1_vip(vip, **vip_update_args) - self.assertEqual(expected_vip, result_vip) - self.clients("neutron").update_vip.assert_called_once_with( - vip["vip"]["id"], expected_vip) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.update_vip") - - @mock.patch("%s.NeutronScenario.generate_random_name" % NEUTRON_UTILS) - def test_create_security_group(self, mock_generate_random_name): - security_group_create_args = {"description": "Fake security group"} - expected_security_group = { - "security_group": { - "id": "fake-id", - "name": self.scenario.generate_random_name.return_value, - "description": "Fake security group" - } - } - self.clients("neutron").create_security_group = mock.Mock( - return_value=expected_security_group) - - security_group_data = { - "security_group": - {"name": "random_name", - "description": "Fake security group"} - } - resultant_security_group = self.scenario._create_security_group( - **security_group_create_args) - self.assertEqual(expected_security_group, resultant_security_group) - self.clients("neutron").create_security_group.assert_called_once_with( - security_group_data) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_security_group") - - def test_list_security_groups(self): - security_groups_list = [{"id": "security-group-id"}] - security_groups_dict = {"security_groups": security_groups_list} - self.clients("neutron").list_security_groups = mock.Mock( - return_value=security_groups_dict) - self.assertEqual( - self.scenario._list_security_groups(), - self.clients("neutron").list_security_groups.return_value) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_security_groups") - - def test_show_security_group(self): - security_group = {"security_group": {"id": "fake-id"}} - result = self.scenario._show_security_group(security_group) - self.assertEqual( - result, - self.clients("neutron").show_security_group.return_value) - self.clients("neutron").show_security_group.assert_called_once_with( - security_group["security_group"]["id"]) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.show_security_group") - - def test_delete_security_group(self): - security_group = {"security_group": {"id": "fake-id"}} - self.scenario._delete_security_group(security_group) - self.clients("neutron").delete_security_group.assert_called_once_with( - security_group["security_group"]["id"]) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_security_group") - - def test_update_security_group(self): - security_group = { - "security_group": { - "id": "security-group-id", - "description": "Not updated" - } - } - expected_security_group = { - "security_group": { - "id": "security-group-id", - "name": self.scenario.generate_random_name.return_value, - "description": "Updated" - } - } - - self.clients("neutron").update_security_group = mock.Mock( - return_value=expected_security_group) - result_security_group = self.scenario._update_security_group( - security_group, description="Updated") - self.clients("neutron").update_security_group.assert_called_once_with( - security_group["security_group"]["id"], - {"security_group": { - "description": "Updated", - "name": self.scenario.generate_random_name.return_value}} - ) - self.assertEqual(expected_security_group, result_security_group) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.update_security_group") - - def test_create_security_group_rule(self): - security_group_rule_args = {"description": "Fake Rule"} - expected_security_group_rule = { - "security_group_rule": { - "id": "fake-id", - "security_group_id": "security-group-id", - "direction": "ingress", - "description": "Fake Rule" - } - } - client = self.clients("neutron") - client.create_security_group_rule = mock.Mock( - return_value=expected_security_group_rule) - - security_group_rule_data = { - "security_group_rule": - {"security_group_id": "security-group-id", - "direction": "ingress", - "description": "Fake Rule"} - } - result_security_group_rule = self.scenario._create_security_group_rule( - "security-group-id", **security_group_rule_args) - self.assertEqual(expected_security_group_rule, - result_security_group_rule) - client.create_security_group_rule.assert_called_once_with( - security_group_rule_data) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_security_group_rule") - - def test_list_security_group_rules(self): - security_group_rules_list = [{"id": "security-group-rule-id"}] - security_group_rules_dict = { - "security_group_rules": security_group_rules_list} - - self.clients("neutron").list_security_group_rules = mock.Mock( - return_value=security_group_rules_dict) - self.assertEqual( - self.scenario._list_security_group_rules(), - self.clients("neutron").list_security_group_rules.return_value) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_security_group_rules") - - def test_show_security_group_rule(self): - return_rule = self.scenario._show_security_group_rule(1) - self.assertEqual( - self.clients("neutron").show_security_group_rule.return_value, - return_rule) - - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.show_security_group_rule") - - def test_delete_security_group_rule(self): - self.scenario._delete_security_group_rule(1) - clients = self.clients("neutron") - clients.delete_security_group_rule.assert_called_once_with(1) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_security_group_rule") - - @ddt.data( - {"networks": [{"subnets": "subnet-id"}]}, - {"pool_create_args": None, "networks": [{"subnets": ["subnet-id"]}]}, - {"pool_create_args": {}, "networks": [{"subnets": ["subnet-id"]}]}, - {"pool_create_args": {"name": "given-name"}, - "networks": [{"subnets": ["subnet-id"]}]}, - ) - @ddt.unpack - def test__create_v1_pools(self, networks, pool_create_args=None): - pool_create_args = pool_create_args or {} - pool = {"pool": {"id": "pool-id"}} - self.scenario._create_lb_pool = mock.Mock(return_value=pool) - resultant_pools = self.scenario._create_v1_pools( - networks=networks, **pool_create_args) - if networks: - subnets = [] - [subnets.extend(net["subnets"]) for net in networks] - self.scenario._create_lb_pool.assert_has_calls( - [mock.call(subnet, - **pool_create_args) for subnet in subnets]) - self.assertEqual([pool] * len(subnets), resultant_pools) - - @ddt.data( - {"subnet_id": "foo-id"}, - {"pool_create_args": None, "subnet_id": "foo-id"}, - {"pool_create_args": {}, "subnet_id": "foo-id"}, - {"pool_create_args": {"name": "given-name"}, - "subnet_id": "foo-id"}, - {"subnet_id": "foo-id"} - ) - @ddt.unpack - def test__create_lb_pool(self, subnet_id=None, - pool_create_args=None): - pool = {"pool": {"id": "pool-id"}} - pool_create_args = pool_create_args or {} - if pool_create_args.get("name") is None: - self.generate_random_name = mock.Mock(return_value="random_name") - self.clients("neutron").create_pool.return_value = pool - args = {"lb_method": "ROUND_ROBIN", "protocol": "HTTP", - "name": "random_name", "subnet_id": subnet_id} - args.update(pool_create_args) - expected_pool_data = {"pool": args} - resultant_pool = self.scenario._create_lb_pool( - subnet_id=subnet_id, - **pool_create_args) - self.assertEqual(pool, resultant_pool) - self.clients("neutron").create_pool.assert_called_once_with( - expected_pool_data) - self._test_atomic_action_timer( - self.scenario.atomic_actions(), "neutron.create_pool") - - @ddt.data( - {}, - {"vip_create_args": {}}, - {"vip_create_args": {"name": "given-name"}}, - ) - @ddt.unpack - def test__create_v1_vip(self, vip_create_args=None): - vip = {"vip": {"id": "vip-id"}} - pool = {"pool": {"id": "pool-id", "subnet_id": "subnet-id"}} - vip_create_args = vip_create_args or {} - if vip_create_args.get("name") is None: - self.scenario.generate_random_name = mock.Mock( - return_value="random_name") - self.clients("neutron").create_vip.return_value = vip - args = {"protocol_port": 80, "protocol": "HTTP", "name": "random_name", - "subnet_id": pool["pool"]["subnet_id"], - "pool_id": pool["pool"]["id"]} - args.update(vip_create_args) - expected_vip_data = {"vip": args} - resultant_vip = self.scenario._create_v1_vip(pool, **vip_create_args) - self.assertEqual(vip, resultant_vip) - self.clients("neutron").create_vip.assert_called_once_with( - expected_vip_data) - - @ddt.data( - {}, - {"floating_ip_args": {}}, - {"floating_ip_args": {"floating_ip_address": "1.0.0.1"}}, - ) - @ddt.unpack - def test__create_floating_ip(self, floating_ip_args=None): - floating_network = "floating" - fip = {"floatingip": {"id": "fip-id"}} - network_id = "net-id" - floating_ip_args = floating_ip_args or {} - self.clients("neutron").create_floatingip.return_value = fip - mock_get_network_id = self.scenario._get_network_id = mock.Mock() - mock_get_network_id.return_value = network_id - args = {"floating_network_id": network_id, - "description": "random_name"} - args.update(floating_ip_args) - expected_fip_data = {"floatingip": args} - resultant_fip = self.scenario._create_floatingip( - floating_network, **floating_ip_args) - self.assertEqual(fip, resultant_fip) - self.clients("neutron").create_floatingip.assert_called_once_with( - expected_fip_data) - mock_get_network_id.assert_called_once_with(floating_network) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_floating_ip") - - @mock.patch("%s.LOG.info" % NEUTRON_UTILS) - def test__create_floating_ip_in_pre_newton_openstack(self, mock_log_info): - floating_network = "floating" - fip = {"floatingip": {"id": "fip-id"}} - network_id = "net-id" - self.clients("neutron").create_floatingip.return_value = fip - mock_get_network_id = self.scenario._get_network_id = mock.Mock() - mock_get_network_id.return_value = network_id - - from neutronclient.common import exceptions as n_exceptions - e = n_exceptions.BadRequest("Unrecognized attribute(s) 'description'") - self.clients("neutron").create_floatingip.side_effect = e - - a_e = self.assertRaises(n_exceptions.BadRequest, - self.scenario._create_floatingip, - floating_network) - self.assertEqual(e, a_e) - self.assertTrue(mock_log_info.called) - - expected_fip_data = {"floatingip": {"floating_network_id": network_id, - "description": "random_name"}} - self.clients("neutron").create_floatingip.assert_called_once_with( - expected_fip_data) - mock_get_network_id.assert_called_once_with(floating_network) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_floating_ip") - - @ddt.data( - {}, - {"healthmonitor_create_args": {}}, - {"healthmonitor_create_args": {"type": "TCP"}}, - ) - @ddt.unpack - def test__create_v1_healthmonitor(self, - healthmonitor_create_args=None): - hm = {"health_monitor": {"id": "hm-id"}} - healthmonitor_create_args = healthmonitor_create_args or {} - self.clients("neutron").create_health_monitor.return_value = hm - args = {"type": "PING", "delay": 20, - "timeout": 10, "max_retries": 3} - args.update(healthmonitor_create_args) - expected_hm_data = {"health_monitor": args} - resultant_hm = self.scenario._create_v1_healthmonitor( - **healthmonitor_create_args) - self.assertEqual(hm, resultant_hm) - self.clients("neutron").create_health_monitor.assert_called_once_with( - expected_hm_data) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_healthmonitor") - - def test_list_v1_healthmonitors(self): - hm_list = [] - hm_dict = {"health_monitors": hm_list} - self.clients("neutron").list_health_monitors.return_value = hm_dict - return_hm_dict = self.scenario._list_v1_healthmonitors() - self.assertEqual(hm_dict, return_hm_dict) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_healthmonitors") - - def test_delete_v1_healthmonitor(self): - healthmonitor = {"health_monitor": {"id": "fake-id"}} - self.scenario._delete_v1_healthmonitor(healthmonitor["health_monitor"]) - self.clients("neutron").delete_health_monitor.assert_called_once_with( - healthmonitor["health_monitor"]["id"]) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_healthmonitor") - - def test_update_healthmonitor(self): - expected_hm = {"health_monitor": {"admin_state_up": False}} - mock_update = self.clients("neutron").update_health_monitor - mock_update.return_value = expected_hm - hm = {"health_monitor": {"id": "pool-id"}} - healthmonitor_update_args = {"admin_state_up": False} - result_hm = self.scenario._update_v1_healthmonitor( - hm, **healthmonitor_update_args) - self.assertEqual(expected_hm, result_hm) - mock_update.assert_called_once_with( - hm["health_monitor"]["id"], expected_hm) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.update_healthmonitor") - - def test_update_loadbalancer_resource(self): - lb = {"id": "1", "provisioning_status": "READY"} - new_lb = {"id": "1", "provisioning_status": "ACTIVE"} - self.clients("neutron").show_loadbalancer.return_value = { - "loadbalancer": new_lb} - - return_lb = self.scenario.update_loadbalancer_resource(lb) - - self.clients("neutron").show_loadbalancer.assert_called_once_with( - lb["id"]) - self.assertEqual(new_lb, return_lb) - - def test_update_loadbalancer_resource_not_found(self): - from neutronclient.common import exceptions as n_exceptions - lb = {"id": "1", "provisioning_status": "READY"} - self.clients("neutron").show_loadbalancer.side_effect = ( - n_exceptions.NotFound) - - self.assertRaises(exceptions.GetResourceNotFound, - self.scenario.update_loadbalancer_resource, - lb) - self.clients("neutron").show_loadbalancer.assert_called_once_with( - lb["id"]) - - def test_update_loadbalancer_resource_failure(self): - from neutronclient.common import exceptions as n_exceptions - lb = {"id": "1", "provisioning_status": "READY"} - self.clients("neutron").show_loadbalancer.side_effect = ( - n_exceptions.Forbidden) - - self.assertRaises(exceptions.GetResourceFailure, - self.scenario.update_loadbalancer_resource, - lb) - self.clients("neutron").show_loadbalancer.assert_called_once_with( - lb["id"]) - - def test__create_lbaasv2_loadbalancer(self): - neutronclient = self.clients("neutron") - create_args = {"name": "s_rally", "vip_subnet_id": "1", - "fake": "fake"} - new_lb = {"id": "1", "provisioning_status": "ACTIVE"} - - self.scenario.generate_random_name = mock.Mock( - return_value="s_rally") - self.mock_wait_for_status.mock.return_value = new_lb - - return_lb = self.scenario._create_lbaasv2_loadbalancer( - "1", fake="fake") - - neutronclient.create_loadbalancer.assert_called_once_with( - {"loadbalancer": create_args}) - self.assertEqual(new_lb, return_lb) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_lbaasv2_loadbalancer") - - def test__list_lbaasv2_loadbalancers(self): - value = {"loadbalancer": [{"id": "1", "name": "s_rally"}]} - self.clients("neutron").list_loadbalancers.return_value = value - - return_value = self.scenario._list_lbaasv2_loadbalancers( - True, fake="fake") - - (self.clients("neutron").list_loadbalancers - .assert_called_once_with(True, fake="fake")) - self.assertEqual(value, return_value) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_lbaasv2_loadbalancers") - - def test__create_bgpvpn(self, atomic_action=True): - bv = {"bgpvpn": {"id": "bgpvpn-id"}} - self.admin_clients("neutron").create_bgpvpn.return_value = bv - self.scenario.generate_random_name = mock.Mock( - return_value="random_name") - expected_bv_data = {"bgpvpn": {"name": "random_name"}} - resultant_bv = self.scenario._create_bgpvpn() - self.assertEqual(bv, resultant_bv) - self.admin_clients("neutron").create_bgpvpn.assert_called_once_with( - expected_bv_data) - if atomic_action: - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_bgpvpn") - - def test_delete_bgpvpn(self): - bgpvpn_create_args = {} - bgpvpn = self.scenario._create_bgpvpn(**bgpvpn_create_args) - self.scenario._delete_bgpvpn(bgpvpn) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_bgpvpn") - - def test__list_bgpvpns(self): - bgpvpns_list = [] - bgpvpns_dict = {"bgpvpns": bgpvpns_list} - self.admin_clients("neutron").list_bgpvpns.return_value = bgpvpns_dict - return_bgpvpns_list = self.scenario._list_bgpvpns() - self.assertEqual(bgpvpns_list, return_bgpvpns_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_bgpvpns") - - @ddt.data( - {}, - {"bgpvpn_update_args": {"update_name": True}}, - {"bgpvpn_update_args": {"update_name": False}}, - ) - @ddt.unpack - def test__update_bgpvpn(self, bgpvpn_update_args=None): - expected_bgpvpn = {"bgpvpn": {}} - bgpvpn_update_data = bgpvpn_update_args or {} - if bgpvpn_update_data.get("update_name"): - expected_bgpvpn = {"bgpvpn": {"name": "updated_name"}} - self.admin_clients( - "neutron").update_bgpvpn.return_value = expected_bgpvpn - self.scenario.generate_random_name = mock.Mock( - return_value="updated_name") - bgpvpn = {"bgpvpn": {"name": "bgpvpn-name", "id": "bgpvpn-id"}} - result_bgpvpn = self.scenario._update_bgpvpn(bgpvpn, - **bgpvpn_update_data) - self.admin_clients("neutron").update_bgpvpn.assert_called_once_with( - bgpvpn["bgpvpn"]["id"], expected_bgpvpn) - self.assertEqual(expected_bgpvpn, result_bgpvpn) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.update_bgpvpn") - - def test__create_bgpvpn_network_assoc(self): - network_id = "network_id" - bgpvpn_id = "bgpvpn_id" - value = {"network_association": { - "network_id": network_id, - "id": bgpvpn_id}} - self.clients( - "neutron").create_bgpvpn_network_assoc.return_value = value - network = {"id": network_id} - bgpvpn = {"bgpvpn": {"id": bgpvpn_id}} - return_value = self.scenario._create_bgpvpn_network_assoc(bgpvpn, - network) - netassoc = {"network_id": network["id"]} - self.clients( - "neutron").create_bgpvpn_network_assoc.assert_called_once_with( - bgpvpn_id, {"network_association": netassoc}) - self.assertEqual(return_value, value) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_bgpvpn_network_assoc") - - def test__create_router_network_assoc(self): - router_id = "router_id" - bgpvpn_id = "bgpvpn_id" - value = {"router_association": { - "router_id": router_id, - "id": "asso_id"}} - self.clients("neutron").create_bgpvpn_router_assoc.return_value = value - router = {"id": router_id} - bgpvpn = {"bgpvpn": {"id": bgpvpn_id}} - return_value = self.scenario._create_bgpvpn_router_assoc(bgpvpn, - router) - router_assoc = {"router_id": router["id"]} - self.clients( - "neutron").create_bgpvpn_router_assoc.assert_called_once_with( - bgpvpn_id, {"router_association": router_assoc}) - self.assertEqual(return_value, value) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.create_bgpvpn_router_assoc") - - def test__delete_bgpvpn_network_assoc(self): - bgpvpn_assoc_args = {} - asso_id = "aaaa-bbbb" - network_assoc = {"network_association": {"id": asso_id}} - bgpvpn = self.scenario._create_bgpvpn(**bgpvpn_assoc_args) - self.scenario._delete_bgpvpn_network_assoc(bgpvpn, network_assoc) - self.clients( - "neutron").delete_bgpvpn_network_assoc.assert_called_once_with( - bgpvpn["bgpvpn"]["id"], asso_id) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_bgpvpn_network_assoc") - - def test__delete_bgpvpn_router_assoc(self): - bgpvpn_assoc_args = {} - asso_id = "aaaa-bbbb" - router_assoc = {"router_association": {"id": asso_id}} - bgpvpn = self.scenario._create_bgpvpn(**bgpvpn_assoc_args) - self.scenario._delete_bgpvpn_router_assoc(bgpvpn, router_assoc) - self.clients( - "neutron").delete_bgpvpn_router_assoc.assert_called_once_with( - bgpvpn["bgpvpn"]["id"], asso_id) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.delete_bgpvpn_router_assoc") - - def test__list_bgpvpn_network_assocs(self): - value = {"network_associations": []} - bgpvpn_id = "bgpvpn-id" - bgpvpn = {"bgpvpn": {"id": bgpvpn_id}} - self.clients("neutron").list_bgpvpn_network_assocs.return_value = value - return_asso_list = self.scenario._list_bgpvpn_network_assocs(bgpvpn) - self.clients( - "neutron").list_bgpvpn_network_assocs.assert_called_once_with( - bgpvpn_id) - self.assertEqual(value, return_asso_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_bgpvpn_network_assocs") - - def test__list_bgpvpn_router_assocs(self): - value = {"router_associations": []} - bgpvpn_id = "bgpvpn-id" - bgpvpn = {"bgpvpn": {"id": bgpvpn_id}} - self.clients("neutron").list_bgpvpn_router_assocs.return_value = value - return_asso_list = self.scenario._list_bgpvpn_router_assocs(bgpvpn) - self.clients( - "neutron").list_bgpvpn_router_assocs.assert_called_once_with( - bgpvpn_id) - self.assertEqual(value, return_asso_list) - self._test_atomic_action_timer(self.scenario.atomic_actions(), - "neutron.list_bgpvpn_router_assocs") - - -class NeutronScenarioFunctionalTestCase(test.FakeClientsScenarioTestCase): - - @mock.patch("%s.network_wrapper.generate_cidr" % NEUTRON_UTILS) - def test_functional_create_network_and_subnets(self, mock_generate_cidr): - scenario = utils.NeutronScenario(context=self.context) - network_create_args = {} - subnet_create_args = {} - subnets_per_network = 5 - subnet_cidr_start = "1.1.1.0/24" - - cidrs = ["1.1.%d.0/24" % i for i in range(subnets_per_network)] - cidrs_ = iter(cidrs) - mock_generate_cidr.side_effect = lambda **kw: next(cidrs_) - - network, subnets = scenario._create_network_and_subnets( - network_create_args, - subnet_create_args, - subnets_per_network, - subnet_cidr_start) - - # This checks both data (cidrs seem to be enough) and subnets number - result_cidrs = sorted([s["subnet"]["cidr"] for s in subnets]) - self.assertEqual(cidrs, result_cidrs) diff --git a/tests/unit/plugins/openstack/scenarios/nova/__init__.py b/tests/unit/plugins/openstack/scenarios/nova/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_agents.py b/tests/unit/plugins/openstack/scenarios/nova/test_agents.py deleted file mode 100644 index b4d373622f..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_agents.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.nova import agents -from tests.unit import test - - -class NovaAgentsTestCase(test.TestCase): - - def test_list_agents(self): - scenario = agents.ListAgents() - scenario._list_agents = mock.Mock() - scenario.run(hypervisor=None) - scenario._list_agents.assert_called_once_with(None) diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_aggregates.py b/tests/unit/plugins/openstack/scenarios/nova/test_aggregates.py deleted file mode 100644 index 632575bc9f..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_aggregates.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.nova import aggregates -from tests.unit import test - - -class NovaAggregatesTestCase(test.ScenarioTestCase): - - def test_list_aggregates(self): - scenario = aggregates.ListAggregates() - scenario._list_aggregates = mock.Mock() - scenario.run() - scenario._list_aggregates.assert_called_once_with() - - def test_create_and_list_aggregates(self): - # Positive case - scenario = aggregates.CreateAndListAggregates() - scenario._create_aggregate = mock.Mock(return_value="agg1") - scenario._list_aggregates = mock.Mock(return_value=("agg1", "agg2")) - scenario.run(availability_zone="nova") - scenario._create_aggregate.assert_called_once_with("nova") - scenario._list_aggregates.assert_called_once_with() - - # Negative case 1: aggregate isn't created - scenario._create_aggregate.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, availability_zone="nova") - scenario._create_aggregate.assert_called_with("nova") - - # Negative case 2: aggregate was created but not included into list - scenario._create_aggregate.return_value = "agg3" - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, availability_zone="nova") - scenario._create_aggregate.assert_called_with("nova") - scenario._list_aggregates.assert_called_with() - - def test_create_and_delete_aggregate(self): - scenario = aggregates.CreateAndDeleteAggregate() - scenario._create_aggregate = mock.Mock() - scenario._delete_aggregate = mock.Mock() - scenario.run(availability_zone="nova") - scenario._create_aggregate.assert_called_once_with("nova") - aggregate = scenario._create_aggregate.return_value - scenario._delete_aggregate.assert_called_once_with(aggregate) - - def test_create_and_update_aggregate(self): - scenario = aggregates.CreateAndUpdateAggregate() - scenario._create_aggregate = mock.Mock() - scenario._update_aggregate = mock.Mock() - scenario.run(availability_zone="nova") - scenario._create_aggregate.assert_called_once_with("nova") - aggregate = scenario._create_aggregate.return_value - scenario._update_aggregate.assert_called_once_with(aggregate) - - def test_create_aggregate_add_and_remove_host(self): - fake_aggregate = "fake_aggregate" - fake_hosts = [mock.Mock(service={"host": "fake_host_name"})] - scenario = aggregates.CreateAggregateAddAndRemoveHost() - scenario._create_aggregate = mock.MagicMock( - return_value=fake_aggregate) - scenario._list_hypervisors = mock.MagicMock(return_value=fake_hosts) - scenario._aggregate_add_host = mock.MagicMock() - scenario._aggregate_remove_host = mock.MagicMock() - scenario.run(availability_zone="nova") - scenario._create_aggregate.assert_called_once_with( - "nova") - scenario._list_hypervisors.assert_called_once_with() - scenario._aggregate_add_host.assert_called_once_with( - "fake_aggregate", "fake_host_name") - scenario._aggregate_remove_host.assert_called_once_with( - "fake_aggregate", "fake_host_name") - - def test_create_and_get_aggregate_details(self): - scenario = aggregates.CreateAndGetAggregateDetails() - scenario._create_aggregate = mock.Mock() - scenario._get_aggregate_details = mock.Mock() - scenario.run(availability_zone="nova") - scenario._create_aggregate.assert_called_once_with("nova") - aggregate = scenario._create_aggregate.return_value - scenario._get_aggregate_details.assert_called_once_with(aggregate) - - def test_create_aggregate_add_host_and_boot_server(self): - fake_aggregate = mock.Mock() - fake_hosts = [mock.Mock(service={"host": "fake_host_name"})] - fake_flavor = mock.MagicMock(id="flavor-id-0", ram=512, disk=1, - vcpus=1) - fake_metadata = {"test_metadata": "true"} - fake_server = mock.MagicMock(id="server-id-0") - setattr(fake_server, "OS-EXT-SRV-ATTR:hypervisor_hostname", - "fake_host_name") - fake_aggregate_kwargs = {"fake_arg1": "f"} - - scenario = aggregates.CreateAggregateAddHostAndBootServer() - scenario._create_aggregate = mock.MagicMock( - return_value=fake_aggregate) - scenario._list_hypervisors = mock.MagicMock(return_value=fake_hosts) - scenario._aggregate_add_host = mock.MagicMock() - scenario._aggregate_set_metadata = mock.MagicMock() - scenario._create_flavor = mock.MagicMock(return_value=fake_flavor) - scenario._boot_server = mock.MagicMock(return_value=fake_server) - self.admin_clients("nova").servers.get.return_value = fake_server - - scenario.run("img", fake_metadata, availability_zone="nova", - boot_server_kwargs=fake_aggregate_kwargs) - scenario._create_aggregate.assert_called_once_with("nova") - scenario._list_hypervisors.assert_called_once_with() - scenario._aggregate_set_metadata.assert_called_once_with( - fake_aggregate, fake_metadata) - scenario._aggregate_add_host(fake_aggregate, "fake_host_name") - scenario._create_flavor.assert_called_once_with(512, 1, 1) - fake_flavor.set_keys.assert_called_once_with(fake_metadata) - scenario._boot_server.assert_called_once_with("img", "flavor-id-0", - **fake_aggregate_kwargs) - self.admin_clients("nova").servers.get.assert_called_once_with( - "server-id-0") - - self.assertEqual(getattr( - fake_server, "OS-EXT-SRV-ATTR:hypervisor_hostname"), - "fake_host_name") - - def test_create_aggregate_add_host_and_boot_server_failure(self): - fake_aggregate = mock.Mock() - fake_hosts = [mock.Mock(service={"host": "fake_host_name"})] - fake_flavor = mock.MagicMock(id="flavor-id-0", ram=512, disk=1, - vcpus=1) - fake_metadata = {"test_metadata": "true"} - fake_server = mock.MagicMock(id="server-id-0") - setattr(fake_server, "OS-EXT-SRV-ATTR:hypervisor_hostname", - "wrong_host_name") - fake_boot_server_kwargs = {"fake_arg1": "f"} - - scenario = aggregates.CreateAggregateAddHostAndBootServer() - scenario._create_aggregate = mock.MagicMock( - return_value=fake_aggregate) - scenario._list_hypervisors = mock.MagicMock(return_value=fake_hosts) - scenario._aggregate_add_host = mock.MagicMock() - scenario._aggregate_set_metadata = mock.MagicMock() - scenario._create_flavor = mock.MagicMock(return_value=fake_flavor) - scenario._boot_server = mock.MagicMock(return_value=fake_server) - self.admin_clients("nova").servers.get.return_value = fake_server - - self.assertRaises(exceptions.RallyException, scenario.run, "img", - fake_metadata, "nova", fake_boot_server_kwargs) diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_availability_zones.py b/tests/unit/plugins/openstack/scenarios/nova/test_availability_zones.py deleted file mode 100644 index 8cc66b3978..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_availability_zones.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.nova import availability_zones -from tests.unit import test - - -class NovaAvailabilityZonesTestCase(test.TestCase): - - def test_list_availability_zones(self): - scenario = availability_zones.ListAvailabilityZones() - scenario._list_availability_zones = mock.Mock() - scenario.run(detailed=False) - scenario._list_availability_zones.assert_called_once_with(False) diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_flavors.py b/tests/unit/plugins/openstack/scenarios/nova/test_flavors.py deleted file mode 100644 index abd619f0ea..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_flavors.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright: 2015. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.nova import flavors -from tests.unit import test - - -@ddt.ddt -class NovaFlavorsTestCase(test.TestCase): - - def test_list_flavors(self): - scenario = flavors.ListFlavors() - scenario._list_flavors = mock.Mock() - scenario.run(detailed=True, is_public=True, limit=None, marker=None, - min_disk=None, min_ram=None, sort_dir=None, sort_key=None) - scenario._list_flavors.assert_called_once_with( - detailed=True, is_public=True, limit=None, marker=None, - min_disk=None, min_ram=None, sort_dir=None, sort_key=None) - - def test_create_and_list_flavor_access(self): - # Common parameters - ram = 100 - vcpus = 1 - disk = 1 - - scenario = flavors.CreateAndListFlavorAccess() - scenario._create_flavor = mock.Mock() - scenario._list_flavor_access = mock.Mock() - - # Positive case: - scenario.run( - ram, vcpus, disk, ephemeral=0, flavorid="auto", - is_public=False, rxtx_factor=1.0, swap=0) - scenario._create_flavor.assert_called_once_with( - ram, vcpus, disk, ephemeral=0, flavorid="auto", - is_public=False, rxtx_factor=1.0, swap=0) - scenario._list_flavor_access.assert_called_once_with( - scenario._create_flavor.return_value.id) - - # Negative case1: flavor wasn't created - scenario._create_flavor.return_value = None - self.assertRaises(exceptions.RallyAssertionError, scenario.run, - ram, vcpus, disk, ephemeral=0, flavorid="auto", - is_public=False, rxtx_factor=1.0, swap=0) - scenario._create_flavor.assert_called_with( - ram, vcpus, disk, ephemeral=0, flavorid="auto", - is_public=False, rxtx_factor=1.0, swap=0) - - def test_create_flavor_add_tenant_access(self): - flavor = mock.MagicMock() - context = {"user": {"tenant_id": "fake"}, - "tenant": {"id": "fake"}} - scenario = flavors.CreateFlavorAndAddTenantAccess() - scenario.context = context - scenario.generate_random_name = mock.MagicMock() - scenario._create_flavor = mock.MagicMock(return_value=flavor) - scenario._add_tenant_access = mock.MagicMock() - - # Positive case: - scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, - flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) - - scenario._create_flavor.assert_called_once_with( - 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, - rxtx_factor=1.0, swap=0) - scenario._add_tenant_access.assert_called_once_with(flavor.id, - "fake") - - # Negative case1: flavor wasn't created - scenario._create_flavor.return_value = None - self.assertRaises(exceptions.RallyAssertionError, scenario.run, - 100, 1, 1, ephemeral=0, flavorid="auto", - is_public=True, rxtx_factor=1.0, swap=0) - scenario._create_flavor.assert_called_with( - 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, - rxtx_factor=1.0, swap=0) - - def test_create_flavor(self): - scenario = flavors.CreateFlavor() - scenario._create_flavor = mock.MagicMock() - scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid="auto", - is_public=True, rxtx_factor=1.0, swap=0) - scenario._create_flavor.assert_called_once_with( - 100, 1, 1, ephemeral=0, - flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) - - def test_create_and_get_flavor(self, **kwargs): - scenario = flavors.CreateAndGetFlavor() - scenario._create_flavor = mock.Mock() - scenario._get_flavor = mock.Mock() - scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid="auto", - is_public=True, rxtx_factor=1.0, swap=0) - - scenario._create_flavor.assert_called_once_with( - 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, - rxtx_factor=1.0, swap=0) - scenario._get_flavor.assert_called_once_with( - scenario._create_flavor.return_value.id) - - def test_create_and_delete_flavor(self): - scenario = flavors.CreateAndDeleteFlavor() - scenario._create_flavor = mock.Mock() - scenario._delete_flavor = mock.Mock() - scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid="auto", - is_public=True, rxtx_factor=1.0, swap=0) - - scenario._create_flavor.assert_called_once_with( - 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, - rxtx_factor=1.0, swap=0) - scenario._delete_flavor.assert_called_once_with( - scenario._create_flavor.return_value.id) - - def test_create_flavor_and_set_keys(self): - scenario = flavors.CreateFlavorAndSetKeys() - scenario._create_flavor = mock.MagicMock() - scenario._set_flavor_keys = mock.MagicMock() - specs_args = {"fakeargs": "foo"} - scenario.run( - ram=100, vcpus=1, disk=1, extra_specs=specs_args, - ephemeral=0, flavorid="auto", is_public=True, - rxtx_factor=1.0, swap=0) - - scenario._create_flavor.assert_called_once_with( - 100, 1, 1, ephemeral=0, flavorid="auto", - is_public=True, rxtx_factor=1.0, swap=0) - scenario._set_flavor_keys.assert_called_once_with( - scenario._create_flavor.return_value, specs_args) diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_hypervisors.py b/tests/unit/plugins/openstack/scenarios/nova/test_hypervisors.py deleted file mode 100644 index 9e77ebd449..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_hypervisors.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2013 Cisco Systems Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.nova import hypervisors -from tests.unit import test - - -class NovaHypervisorsTestCase(test.ScenarioTestCase): - def test_list_hypervisors(self): - scenario = hypervisors.ListHypervisors(self.context) - scenario._list_hypervisors = mock.Mock() - scenario.run(detailed=False) - scenario._list_hypervisors.assert_called_once_with(False) - - def test_list_and_get_hypervisors(self): - scenario = hypervisors.ListAndGetHypervisors(self.context) - scenario._list_hypervisors = mock.MagicMock(detailed=False) - scenario._get_hypervisor = mock.MagicMock() - scenario.run(detailed=False) - - scenario._list_hypervisors.assert_called_once_with(False) - for hypervisor in scenario._list_hypervisors.return_value: - scenario._get_hypervisor.assert_called_once_with(hypervisor) - - def test_statistics_hypervisors(self): - scenario = hypervisors.StatisticsHypervisors(self.context) - scenario._statistics_hypervisors = mock.Mock() - scenario.run() - scenario._statistics_hypervisors.assert_called_once_with() - - def test_list_and_get_uptime_hypervisors(self): - scenario = hypervisors.ListAndGetUptimeHypervisors(self.context) - scenario._list_hypervisors = mock.MagicMock(detailed=False) - scenario._uptime_hypervisor = mock.MagicMock() - scenario.run(detailed=False) - - scenario._list_hypervisors.assert_called_once_with(False) - for hypervisor in scenario._list_hypervisors.return_value: - scenario._uptime_hypervisor.assert_called_once_with(hypervisor) - - def test_list_and_search_hypervisors(self): - fake_hypervisors = [mock.Mock(hypervisor_hostname="fake_hostname")] - scenario = hypervisors.ListAndSearchHypervisors(self.context) - scenario._list_hypervisors = mock.MagicMock( - return_value=fake_hypervisors) - scenario._search_hypervisors = mock.MagicMock() - scenario.run(detailed=False) - - scenario._list_hypervisors.assert_called_once_with(False) - scenario._search_hypervisors.assert_called_once_with( - "fake_hostname") diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_images.py b/tests/unit/plugins/openstack/scenarios/nova/test_images.py deleted file mode 100644 index 26136dd2ce..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_images.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright: 2015 Workday, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.nova import images -from tests.unit import test - - -class NovaImagesTestCase(test.TestCase): - - def test_list_images(self): - scenario = images.ListImages() - scenario._list_images = mock.Mock() - scenario.run(detailed=False, fakearg="fakearg") - scenario._list_images.assert_called_once_with(False, fakearg="fakearg") diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_keypairs.py b/tests/unit/plugins/openstack/scenarios/nova/test_keypairs.py deleted file mode 100644 index 4b20c6ade6..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_keypairs.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2015: Hewlett-Packard Development Company, L.P. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack.scenarios.nova import keypairs -from tests.unit import fakes -from tests.unit import test - - -class NovaKeypairTestCase(test.ScenarioTestCase): - - def test_create_and_list_keypairs(self): - - fake_nova_client = fakes.FakeNovaClient() - fake_nova_client.keypairs.create("keypair") - fake_keypair = list(fake_nova_client.keypairs.cache.values())[0] - - scenario = keypairs.CreateAndListKeypairs(self.context) - scenario._create_keypair = mock.MagicMock() - scenario._list_keypairs = mock.MagicMock() - - scenario._list_keypairs.return_value = [fake_keypair] * 3 - # Positive case: - scenario._create_keypair.return_value = fake_keypair.id - scenario.run(fakearg="fakearg") - - scenario._create_keypair.assert_called_once_with(fakearg="fakearg") - scenario._list_keypairs.assert_called_once_with() - - # Negative case1: keypair isn't created - scenario._create_keypair.return_value = None - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, fakearg="fakearg") - scenario._create_keypair.assert_called_with(fakearg="fakearg") - - # Negative case2: new keypair not in the list of keypairs - scenario._create_keypair.return_value = "fake_keypair" - self.assertRaises(exceptions.RallyAssertionError, - scenario.run, fakearg="fakearg") - scenario._create_keypair.assert_called_with(fakearg="fakearg") - scenario._list_keypairs.assert_called_with() - - def test_create_and_get_keypair(self): - scenario = keypairs.CreateAndGetKeypair(self.context) - fake_keypair = mock.MagicMock() - scenario._create_keypair = mock.MagicMock() - scenario._get_keypair = mock.MagicMock() - - scenario._create_keypair.return_value = fake_keypair - scenario.run(fakearg="fakearg") - - scenario._create_keypair.assert_called_once_with(fakearg="fakearg") - scenario._get_keypair.assert_called_once_with(fake_keypair) - - def test_create_and_delete_keypair(self): - scenario = keypairs.CreateAndDeleteKeypair(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._create_keypair = mock.MagicMock(return_value="foo_keypair") - scenario._delete_keypair = mock.MagicMock() - - scenario.run(fakearg="fakearg") - - scenario._create_keypair.assert_called_once_with(fakearg="fakearg") - scenario._delete_keypair.assert_called_once_with("foo_keypair") - - def test_boot_and_delete_server_with_keypair(self): - scenario = keypairs.BootAndDeleteServerWithKeypair(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._create_keypair = mock.MagicMock(return_value="foo_keypair") - scenario._boot_server = mock.MagicMock(return_value="foo_server") - scenario._delete_server = mock.MagicMock() - scenario._delete_keypair = mock.MagicMock() - - fake_server_args = { - "foo": 1, - "bar": 2, - } - - scenario.run("img", 1, boot_server_kwargs=fake_server_args, - fake_arg1="foo", fake_arg2="bar") - - scenario._create_keypair.assert_called_once_with( - fake_arg1="foo", fake_arg2="bar") - - scenario._boot_server.assert_called_once_with( - "img", 1, foo=1, bar=2, key_name="foo_keypair") - - scenario._delete_server.assert_called_once_with("foo_server") - - scenario._delete_keypair.assert_called_once_with("foo_keypair") diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_server_groups.py b/tests/unit/plugins/openstack/scenarios/nova/test_server_groups.py deleted file mode 100644 index d4dc779896..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_server_groups.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright 2017: Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions as rally_exceptions -from rally.plugins.openstack.scenarios.nova import server_groups -from tests.unit import test - -SERVER_GROUPS_MODULE = "rally.plugins.openstack.scenarios.nova.server_groups" -NOVA_SERVER_GROUPS = SERVER_GROUPS_MODULE + ".NovaServerGroups" - - -@ddt.ddt -class NovaServerGroupsTestCase(test.ScenarioTestCase): - - def test_create_and_list_server_groups(self): - scenario = server_groups.CreateAndListServerGroups(self.context) - fake_server_group = mock.MagicMock() - all_projects = False - scenario._create_server_group = mock.MagicMock() - scenario._list_server_groups = mock.MagicMock() - scenario._list_server_groups.return_value = [mock.MagicMock(), - fake_server_group, - mock.MagicMock()] - # Positive case and kwargs is None - scenario._create_server_group.return_value = fake_server_group - scenario.run(policies="fake_policy", all_projects=False, kwargs=None) - kwargs = { - "policies": "fake_policy" - } - scenario._create_server_group.assert_called_once_with(**kwargs) - scenario._list_server_groups.assert_called_once_with(all_projects) - - # Positive case and kwargs is not None - foo_kwargs = { - "policies": "fake_policy" - } - scenario._create_server_group.return_value = fake_server_group - scenario.run(policies=None, all_projects=False, - kwargs=foo_kwargs) - scenario._create_server_group.assert_called_with(**foo_kwargs) - scenario._list_server_groups.assert_called_with(all_projects) - - # Negative case1: server group isn't created - scenario._create_server_group.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - **kwargs) - scenario._create_server_group.assert_called_with(**kwargs) - - # Negative case2: server group not in the list of available server - # groups - scenario._create_server_group.return_value = mock.MagicMock() - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - **kwargs) - scenario._create_server_group.assert_called_with(**kwargs) - scenario._list_server_groups.assert_called_with(all_projects) - - def test_create_and_get_server_group_positive(self): - scenario = server_groups.CreateAndGetServerGroup(self.context) - fake_server_group = mock.MagicMock() - fake_server_group_info = mock.MagicMock() - fake_server_group.id = 123 - fake_server_group_info.id = 123 - scenario._create_server_group = mock.MagicMock() - scenario._get_server_group = mock.MagicMock() - # Positive case and kwargs is None - kwargs = { - "policies": "fake_policy" - } - scenario._create_server_group.return_value = fake_server_group - scenario._get_server_group.return_value = fake_server_group_info - scenario.run(policies="fake_policy", kwargs=None) - scenario._create_server_group.assert_called_once_with(**kwargs) - scenario._get_server_group.assert_called_once_with( - fake_server_group.id) - - # Positive case and kwargs is not None - scenario._create_server_group.return_value = fake_server_group - scenario._get_server_group.return_value = fake_server_group_info - foo_kwargs = { - "policies": "fake_policy" - } - scenario.run(policies=None, kwargs=foo_kwargs) - scenario._create_server_group.assert_called_with(**foo_kwargs) - scenario._get_server_group.assert_called_with( - fake_server_group.id) - - def test_create_and_get_server_group_negative(self): - scenario = server_groups.CreateAndGetServerGroup(self.context) - fake_server_group = mock.MagicMock() - fake_server_group_info = mock.MagicMock() - fake_server_group.id = 123 - fake_server_group_info.id = 123 - kwargs = { - "policies": "fake_policy" - } - scenario._create_server_group = mock.MagicMock() - scenario._get_server_group = mock.MagicMock() - - # Negative case1: server group isn't created - scenario._create_server_group.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - **kwargs) - scenario._create_server_group.assert_called_with(**kwargs) - - # Negative case2: server group to get information not the created one - fake_server_group_info.id = 456 - scenario._create_server_group.return_value = fake_server_group - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - **kwargs) - scenario._create_server_group.assert_called_with(**kwargs) - scenario._get_server_group.assert_called_with( - fake_server_group.id) - - def test_create_and_delete_server_group(self): - scenario = server_groups.CreateAndDeleteServerGroup(self.context) - fake_server_group = mock.MagicMock() - scenario._create_server_group = mock.MagicMock() - scenario._delete_server_group = mock.MagicMock() - - # Positive case and kwargs is None - kwargs = { - "policies": "fake_policy" - } - scenario._create_server_group.return_value = fake_server_group - scenario.run(policies="fake_policy", kwargs=None) - scenario._create_server_group.assert_called_once_with(**kwargs) - scenario._delete_server_group.assert_called_once_with( - fake_server_group.id) - - # Positive case and kwargs is not None - scenario._create_server_group.return_value = fake_server_group - foo_kwargs = { - "policies": "fake_policy" - } - scenario.run(policies=None, kwargs=foo_kwargs) - scenario._create_server_group.assert_called_with(**foo_kwargs) - scenario._delete_server_group.assert_called_with( - fake_server_group.id) - - # Negative case: server group isn't created - scenario._create_server_group.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - **kwargs) - scenario._create_server_group.assert_called_with(**kwargs) diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_servers.py b/tests/unit/plugins/openstack/scenarios/nova/test_servers.py deleted file mode 100644 index 5e628b5fe4..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_servers.py +++ /dev/null @@ -1,1028 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions as rally_exceptions -from rally.plugins.openstack.scenarios.nova import servers -from tests.unit import fakes -from tests.unit import test - - -NOVA_SERVERS_MODULE = "rally.plugins.openstack.scenarios.nova.servers" -NOVA_SERVERS = NOVA_SERVERS_MODULE + ".NovaServers" - - -@ddt.ddt -class NovaServersTestCase(test.ScenarioTestCase): - - @ddt.data(("rescue_unrescue", ["_rescue_server", "_unrescue_server"], 1), - ("stop_start", ["_stop_server", "_start_server"], 2), - ("pause_unpause", ["_pause_server", "_unpause_server"], 3), - ("suspend_resume", ["_suspend_server", "_resume_server"], 4), - ("lock_unlock", ["_lock_server", "_unlock_server"], 5), - ("shelve_unshelve", ["_shelve_server", "_unshelve_server"], 6)) - @ddt.unpack - def test_action_pair(self, action_pair, methods, nof_calls): - actions = [{action_pair: nof_calls}] - fake_server = mock.MagicMock() - scenario = servers.BootAndBounceServer(self.context) - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._delete_server = mock.MagicMock() - scenario.generate_random_name = mock.MagicMock(return_value="name") - for method in methods: - setattr(scenario, method, mock.MagicMock()) - - scenario.run("img", 1, actions=actions) - - scenario._boot_server.assert_called_once_with("img", 1) - server_calls = [] - for i in range(nof_calls): - server_calls.append(mock.call(fake_server)) - for method in methods: - mocked_method = getattr(scenario, method) - self.assertEqual(nof_calls, mocked_method.call_count, - "%s not called %d times" % (method, nof_calls)) - mocked_method.assert_has_calls(server_calls) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - def test_multiple_bounce_actions(self): - actions = [{"hard_reboot": 5}, {"stop_start": 8}, - {"rescue_unrescue": 3}, {"pause_unpause": 2}, - {"suspend_resume": 4}, {"lock_unlock": 6}, - {"shelve_unshelve": 7}] - fake_server = mock.MagicMock() - scenario = servers.BootAndBounceServer(self.context) - - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._delete_server = mock.MagicMock() - scenario._reboot_server = mock.MagicMock() - scenario._stop_and_start_server = mock.MagicMock() - scenario._rescue_and_unrescue_server = mock.MagicMock() - scenario._pause_and_unpause_server = mock.MagicMock() - scenario._suspend_and_resume_server = mock.MagicMock() - scenario._lock_and_unlock_server = mock.MagicMock() - scenario._shelve_and_unshelve_server = mock.MagicMock() - scenario.generate_random_name = mock.MagicMock(return_value="name") - - scenario.run("img", 1, actions=actions) - scenario._boot_server.assert_called_once_with("img", 1) - server_calls = [] - for i in range(5): - server_calls.append(mock.call(fake_server)) - self.assertEqual(5, scenario._reboot_server.call_count, - "Reboot not called 5 times") - scenario._reboot_server.assert_has_calls(server_calls) - server_calls = [] - for i in range(8): - server_calls.append(mock.call(fake_server)) - self.assertEqual(8, scenario._stop_and_start_server.call_count, - "Stop/Start not called 8 times") - scenario._stop_and_start_server.assert_has_calls(server_calls) - server_calls = [] - for i in range(3): - server_calls.append(mock.call(fake_server)) - self.assertEqual(3, scenario._rescue_and_unrescue_server.call_count, - "Rescue/Unrescue not called 3 times") - scenario._rescue_and_unrescue_server.assert_has_calls(server_calls) - server_calls = [] - for i in range(2): - server_calls.append(mock.call(fake_server)) - self.assertEqual(2, scenario._pause_and_unpause_server.call_count, - "Pause/Unpause not called 2 times") - scenario._pause_and_unpause_server.assert_has_calls(server_calls) - server_calls = [] - for i in range(4): - server_calls.append(mock.call(fake_server)) - self.assertEqual(4, scenario._suspend_and_resume_server.call_count, - "Suspend/Resume not called 4 times") - scenario._suspend_and_resume_server.assert_has_calls(server_calls) - server_calls = [] - for i in range(6): - server_calls.append(mock.call(fake_server)) - self.assertEqual(6, scenario._lock_and_unlock_server.call_count, - "Lock/Unlock not called 6 times") - scenario._lock_and_unlock_server.assert_has_calls(server_calls) - server_calls = [] - for i in range(7): - server_calls.append(mock.call(fake_server)) - self.assertEqual(7, scenario._shelve_and_unshelve_server.call_count, - "Shelve/Unshelve not called 7 times") - scenario._shelve_and_unshelve_server.assert_has_calls(server_calls) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - def test_boot_lock_unlock_and_delete(self): - server = fakes.FakeServer() - image = fakes.FakeImage() - flavor = fakes.FakeFlavor() - - scenario = servers.BootLockUnlockAndDelete(self.context) - scenario._boot_server = mock.Mock(return_value=server) - scenario._lock_server = mock.Mock(side_effect=lambda s: s.lock()) - scenario._unlock_server = mock.Mock(side_effect=lambda s: s.unlock()) - scenario._delete_server = mock.Mock( - side_effect=lambda s, **kwargs: - self.assertFalse(getattr(s, "OS-EXT-STS:locked", False))) - - scenario.run(image, flavor, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with(image, flavor, - fakearg="fakearg") - scenario._lock_server.assert_called_once_with(server) - scenario._unlock_server.assert_called_once_with(server) - scenario._delete_server.assert_called_once_with(server, force=False) - - @ddt.data("hard_reboot", "soft_reboot", "stop_start", - "rescue_unrescue", "pause_unpause", "suspend_resume", - "lock_unlock", "shelve_unshelve") - def test_validate_actions(self, action): - scenario = servers.BootAndBounceServer(self.context) - - self.assertRaises(rally_exceptions.InvalidConfigException, - scenario.run, - 1, 1, actions=[{action: "no"}]) - self.assertRaises(rally_exceptions.InvalidConfigException, - scenario.run, - 1, 1, actions=[{action: -1}]) - self.assertRaises(rally_exceptions.InvalidConfigException, - scenario.run, - 1, 1, actions=[{action: 0}]) - - def test_validate_actions_additional(self): - scenario = servers.BootAndBounceServer(self.context) - - self.assertRaises(rally_exceptions.InvalidConfigException, - scenario.run, - 1, 1, actions=[{"not_existing_action": "no"}]) - # NOTE: next should fail because actions parameter is a just a - # dictionary, not an array of dictionaries - self.assertRaises(rally_exceptions.InvalidConfigException, - scenario.run, - 1, 1, actions={"hard_reboot": 1}) - - def _verify_reboot(self, soft=True): - actions = [{"soft_reboot" if soft else "hard_reboot": 5}] - fake_server = mock.MagicMock() - scenario = servers.BootAndBounceServer(self.context) - - scenario._reboot_server = mock.MagicMock() - scenario._soft_reboot_server = mock.MagicMock() - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._delete_server = mock.MagicMock() - scenario.generate_random_name = mock.MagicMock(return_value="name") - - scenario.run("img", 1, actions=actions) - - scenario._boot_server.assert_called_once_with("img", 1) - server_calls = [] - for i in range(5): - server_calls.append(mock.call(fake_server)) - if soft: - self.assertEqual(5, scenario._soft_reboot_server.call_count, - "Reboot not called 5 times") - scenario._soft_reboot_server.assert_has_calls(server_calls) - else: - self.assertEqual(5, scenario._reboot_server.call_count, - "Reboot not called 5 times") - scenario._reboot_server.assert_has_calls(server_calls) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - def test_boot_soft_reboot(self): - self._verify_reboot(soft=True) - - def test_boot_hard_reboot(self): - self._verify_reboot(soft=False) - - def test_boot_and_delete_server(self): - fake_server = object() - - scenario = servers.BootAndDeleteServer(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._delete_server = mock.MagicMock() - scenario.sleep_between = mock.MagicMock() - - scenario.run("img", 0, 10, 20, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with("img", 0, - fakearg="fakearg") - scenario.sleep_between.assert_called_once_with(10, 20) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - def test_boot_and_delete_multiple_servers(self): - scenario = servers.BootAndDeleteMultipleServers(self.context) - scenario._boot_servers = mock.Mock() - scenario._delete_servers = mock.Mock() - scenario.sleep_between = mock.Mock() - - scenario.run("img", "flavor", count=15, min_sleep=10, - max_sleep=20, fakearg="fakearg") - - scenario._boot_servers.assert_called_once_with("img", "flavor", 1, - instances_amount=15, - fakearg="fakearg") - scenario.sleep_between.assert_called_once_with(10, 20) - scenario._delete_servers.assert_called_once_with( - scenario._boot_servers.return_value, force=False) - - def test_boot_and_list_server(self): - scenario = servers.BootAndListServer(self.context) -# scenario.generate_random_name = mock.MagicMock(return_value="name") - - img_name = "img" - flavor_uuid = 0 - details = True - fake_server_name = mock.MagicMock() - scenario._boot_server = mock.MagicMock() - scenario._list_servers = mock.MagicMock() - scenario._list_servers.return_value = [mock.MagicMock(), - fake_server_name, - mock.MagicMock()] - - # Positive case - scenario._boot_server.return_value = fake_server_name - scenario.run(img_name, flavor_uuid, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with(img_name, flavor_uuid, - fakearg="fakearg") - scenario._list_servers.assert_called_once_with(details) - - # Negative case1: server isn't created - scenario._boot_server.return_value = None - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - img_name, flavor_uuid, fakearg="fakearg") - scenario._boot_server.assert_called_with(img_name, flavor_uuid, - fakearg="fakearg") - - # Negative case2: server not in the list of available servers - scenario._boot_server.return_value = mock.MagicMock() - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - img_name, flavor_uuid, fakearg="fakearg") - scenario._boot_server.assert_called_with(img_name, flavor_uuid, - fakearg="fakearg") - scenario._list_servers.assert_called_with(details) - - def test_suspend_and_resume_server(self): - fake_server = object() - - scenario = servers.SuspendAndResumeServer(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._suspend_server = mock.MagicMock() - scenario._resume_server = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - scenario.run("img", 0, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with("img", 0, - fakearg="fakearg") - - scenario._suspend_server.assert_called_once_with(fake_server) - scenario._resume_server.assert_called_once_with(fake_server) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - def test_pause_and_unpause_server(self): - fake_server = object() - - scenario = servers.PauseAndUnpauseServer(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._pause_server = mock.MagicMock() - scenario._unpause_server = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - scenario.run("img", 0, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with("img", 0, - fakearg="fakearg") - - scenario._pause_server.assert_called_once_with(fake_server) - scenario._unpause_server.assert_called_once_with(fake_server) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - def test_shelve_and_unshelve_server(self): - fake_server = mock.MagicMock() - scenario = servers.ShelveAndUnshelveServer(self.context) - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._shelve_server = mock.MagicMock() - scenario._unshelve_server = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - scenario.run("img", 0, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with("img", 0, - fakearg="fakearg") - - scenario._shelve_server.assert_called_once_with(fake_server) - scenario._unshelve_server.assert_called_once_with(fake_server) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - def test_list_servers(self): - scenario = servers.ListServers(self.context) - scenario._list_servers = mock.MagicMock() - scenario.run(True) - scenario._list_servers.assert_called_once_with(True) - - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test_boot_server_from_volume(self, mock_block_storage): - fake_server = object() - scenario = servers.BootServerFromVolume( - self.context, clients=mock.Mock()) - scenario._boot_server = mock.MagicMock(return_value=fake_server) - - fake_volume = fakes.FakeVolumeManager().create() - fake_volume.id = "volume_id" - cinder = mock_block_storage.return_value - cinder.create_volume.return_value = fake_volume - - scenario.run("img", 0, 5, volume_type=None, - auto_assign_nic=False, fakearg="f") - - cinder.create_volume.assert_called_once_with(5, imageRef="img", - volume_type=None) - scenario._boot_server.assert_called_once_with( - None, 0, auto_assign_nic=False, - block_device_mapping={"vda": "volume_id:::1"}, - fakearg="f") - - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test_boot_server_from_volume_and_delete(self, mock_block_storage): - fake_server = object() - scenario = servers.BootServerFromVolumeAndDelete( - self.context, clients=mock.Mock()) - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario.sleep_between = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - fake_volume = fakes.FakeVolumeManager().create() - fake_volume.id = "volume_id" - cinder = mock_block_storage.return_value - cinder.create_volume.return_value = fake_volume - - scenario.run("img", 0, 5, None, 10, 20, fakearg="f") - - cinder.create_volume.assert_called_once_with(5, imageRef="img", - volume_type=None) - scenario._boot_server.assert_called_once_with( - None, 0, - block_device_mapping={"vda": "volume_id:::1"}, - fakearg="f") - scenario.sleep_between.assert_called_once_with(10, 20) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - def _prepare_boot(self, nic=None, assert_nic=False): - fake_server = mock.MagicMock() - - scenario = servers.BootServer(self.context) - - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario.generate_random_name = mock.MagicMock(return_value="name") - - kwargs = {"fakearg": "f"} - expected_kwargs = {"fakearg": "f"} - - assert_nic = nic or assert_nic - if nic: - kwargs["nics"] = nic - if assert_nic: - self.clients("nova").networks.create("net-1") - expected_kwargs["nics"] = nic or [{"net-id": "net-2"}] - - return scenario, kwargs, expected_kwargs - - def _verify_boot_server(self, nic=None, assert_nic=False): - scenario, kwargs, expected_kwargs = self._prepare_boot( - nic=nic, assert_nic=assert_nic) - - scenario.run("img", 0, **kwargs) - scenario._boot_server.assert_called_once_with( - "img", 0, auto_assign_nic=False, **expected_kwargs) - - def test_boot_server_no_nics(self): - self._verify_boot_server(nic=None, assert_nic=False) - - def test_boot_server_with_nic(self): - self._verify_boot_server(nic=[{"net-id": "net-1"}], assert_nic=True) - - def test_snapshot_server(self): - fake_server = object() - fake_image = fakes.FakeImageManager()._create() - fake_image.id = "image_id" - - scenario = servers.SnapshotServer(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._create_image = mock.MagicMock(return_value=fake_image) - scenario._delete_server = mock.MagicMock() - scenario._delete_image = mock.MagicMock() - - scenario.run("i", 0, fakearg=2) - - scenario._boot_server.assert_has_calls([ - mock.call("i", 0, fakearg=2), - mock.call("image_id", 0, fakearg=2)]) - scenario._create_image.assert_called_once_with(fake_server) - scenario._delete_server.assert_has_calls([ - mock.call(fake_server, force=False), - mock.call(fake_server, force=False)]) - scenario._delete_image.assert_called_once_with(fake_image) - - def _test_resize(self, confirm=False): - fake_server = object() - fake_image = fakes.FakeImageManager()._create() - fake_image.id = "image_id" - flavor = mock.MagicMock() - to_flavor = mock.MagicMock() - - scenario = servers.ResizeServer(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._resize_confirm = mock.MagicMock() - scenario._resize_revert = mock.MagicMock() - scenario._resize = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - kwargs = {"confirm": confirm} - scenario.run(fake_image, flavor, to_flavor, **kwargs) - - scenario._resize.assert_called_once_with(fake_server, to_flavor) - - if confirm: - scenario._resize_confirm.assert_called_once_with(fake_server) - else: - scenario._resize_revert.assert_called_once_with(fake_server) - - def test_resize_with_confirm(self): - self._test_resize(confirm=True) - - def test_resize_with_revert(self): - self._test_resize(confirm=False) - - @ddt.data({"confirm": True}, - {"confirm": False}) - @ddt.unpack - def test_resize_shoutoff_server(self, confirm=False): - fake_server = object() - flavor = mock.MagicMock() - to_flavor = mock.MagicMock() - - scenario = servers.ResizeShutoffServer(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._stop_server = mock.MagicMock() - scenario._resize_confirm = mock.MagicMock() - scenario._resize_revert = mock.MagicMock() - scenario._resize = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - scenario.run("img", flavor, to_flavor, confirm=confirm) - - scenario._boot_server.assert_called_once_with("img", flavor) - scenario._stop_server.assert_called_once_with(fake_server) - scenario._resize.assert_called_once_with(fake_server, to_flavor) - - if confirm: - scenario._resize_confirm.assert_called_once_with(fake_server, - "SHUTOFF") - else: - scenario._resize_revert.assert_called_once_with(fake_server, - "SHUTOFF") - - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - @ddt.data({"confirm": True, "do_delete": True}, - {"confirm": False, "do_delete": True}) - @ddt.unpack - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test_boot_server_attach_created_volume_and_resize( - self, mock_block_storage, confirm=False, do_delete=False): - fake_volume = mock.MagicMock() - fake_server = mock.MagicMock() - flavor = mock.MagicMock() - to_flavor = mock.MagicMock() - fake_attachment = mock.MagicMock() - - cinder = mock_block_storage.return_value - cinder.create_volume.return_value = fake_volume - - scenario = servers.BootServerAttachCreatedVolumeAndResize( - self.context, clients=mock.Mock()) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._attach_volume = mock.MagicMock(return_value=fake_attachment) - scenario._resize_confirm = mock.MagicMock() - scenario._resize_revert = mock.MagicMock() - scenario._resize = mock.MagicMock() - scenario._detach_volume = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - scenario.sleep_between = mock.MagicMock() - - volume_size = 10 - scenario.run("img", flavor, to_flavor, volume_size, min_sleep=10, - max_sleep=20, confirm=confirm, do_delete=do_delete) - - scenario._boot_server.assert_called_once_with("img", flavor) - cinder.create_volume.assert_called_once_with(volume_size) - scenario._attach_volume.assert_called_once_with(fake_server, - fake_volume) - scenario._detach_volume.assert_called_once_with(fake_server, - fake_volume) - scenario.sleep_between.assert_called_once_with(10, 20) - scenario._resize.assert_called_once_with(fake_server, to_flavor) - - if confirm: - scenario._resize_confirm.assert_called_once_with(fake_server) - else: - scenario._resize_revert.assert_called_once_with(fake_server) - - if do_delete: - scenario._detach_volume.assert_called_once_with(fake_server, - fake_volume) - cinder.delete_volume.assert_called_once_with(fake_volume) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test_list_attachments(self, mock_block_storage): - mock_volume_service = mock_block_storage.return_value - fake_volume = mock.MagicMock() - fake_server = mock.MagicMock() - flavor = mock.MagicMock() - fake_attachment = mock.MagicMock() - list_attachments = [mock.MagicMock(), - fake_attachment, - mock.MagicMock()] - context = self.context - context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": {"id": "fake_user_id", - "credential": mock.MagicMock()}, - "tenant": {"id": "fake", "name": "fake", - "volumes": [{"id": "uuid", "size": 1}], - "servers": [1]}}) - scenario = servers.BootServerAttachVolumeAndListAttachments( - context) - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._attach_volume = mock.MagicMock() - scenario._list_attachments = mock.MagicMock() - mock_volume_service.create_volume.return_value = fake_volume - scenario._list_attachments.return_value = list_attachments - - img_name = "img" - volume_size = 10 - volume_num = 1 - - scenario._attach_volume.return_value = fake_attachment - scenario.run(img_name, flavor, volume_size, volume_num) - - scenario._boot_server.assert_called_once_with(img_name, flavor) - mock_volume_service.create_volume.assert_called_once_with(volume_size) - scenario._attach_volume.assert_called_once_with(fake_server, - fake_volume) - scenario._list_attachments.assert_called_once_with(fake_server.id) - - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test_list_attachments_fails(self, mock_block_storage): - mock_volume_service = mock_block_storage.return_value - fake_volume = mock.MagicMock() - fake_server = mock.MagicMock() - flavor = mock.MagicMock() - fake_attachment = mock.MagicMock() - list_attachments = [mock.MagicMock(), - mock.MagicMock(), - mock.MagicMock()] - - context = self.context - context.update({ - "admin": { - "id": "fake_user_id", - "credential": mock.MagicMock() - }, - "user": {"id": "fake_user_id", - "credential": mock.MagicMock()}, - "tenant": {"id": "fake", "name": "fake", - "volumes": [{"id": "uuid", "size": 1}], - "servers": [1]}}) - scenario = servers.BootServerAttachVolumeAndListAttachments( - context) - scenario._boot_server = mock.MagicMock(return_value=fake_server) - mock_volume_service.create_volume.return_value = fake_volume - scenario._attach_volume = mock.MagicMock() - scenario._list_attachments = mock.MagicMock() - scenario._attach_volume.return_value = fake_attachment - scenario._list_attachments.return_value = list_attachments - - img_name = "img" - volume_size = 10 - - # Negative case: attachment not included into list of - # available attachments - self.assertRaises(rally_exceptions.RallyAssertionError, - scenario.run, - img_name, flavor, volume_size) - - scenario._boot_server.assert_called_with(img_name, flavor) - mock_volume_service.create_volume.assert_called_with(volume_size) - scenario._attach_volume.assert_called_with(fake_server, - fake_volume) - scenario._list_attachments.assert_called_with(fake_server.id) - - @ddt.data({"confirm": True, "do_delete": True}, - {"confirm": False, "do_delete": True}) - @ddt.unpack - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test_boot_server_from_volume_and_resize( - self, mock_block_storage, confirm=False, do_delete=False): - fake_server = object() - flavor = mock.MagicMock() - to_flavor = mock.MagicMock() - scenario = servers.BootServerFromVolumeAndResize(self.context, - clients=mock.Mock()) - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._resize_confirm = mock.MagicMock() - scenario._resize_revert = mock.MagicMock() - scenario._resize = mock.MagicMock() - scenario.sleep_between = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - fake_volume = fakes.FakeVolumeManager().create() - fake_volume.id = "volume_id" - cinder = mock_block_storage.return_value - cinder.create_volume.return_value = fake_volume - - volume_size = 10 - scenario.run("img", flavor, to_flavor, volume_size, min_sleep=10, - max_sleep=20, confirm=confirm, do_delete=do_delete) - - cinder.create_volume.assert_called_once_with(10, imageRef="img") - scenario._boot_server.assert_called_once_with( - None, flavor, - block_device_mapping={"vda": "volume_id:::1"}) - scenario.sleep_between.assert_called_once_with(10, 20) - scenario._resize.assert_called_once_with(fake_server, to_flavor) - - if confirm: - scenario._resize_confirm.assert_called_once_with(fake_server) - else: - scenario._resize_revert.assert_called_once_with(fake_server) - - if do_delete: - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - def test_boot_and_live_migrate_server(self): - fake_server = mock.MagicMock() - - scenario = servers.BootAndLiveMigrateServer(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario.sleep_between = mock.MagicMock() - scenario._live_migrate = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - scenario.run("img", 0, min_sleep=10, max_sleep=20, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with("img", 0, - fakearg="fakearg") - - scenario.sleep_between.assert_called_once_with(10, 20) - - scenario._live_migrate.assert_called_once_with(fake_server, - False, False) - scenario._delete_server.assert_called_once_with(fake_server) - - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test_boot_server_from_volume_and_live_migrate(self, - mock_block_storage): - fake_server = mock.MagicMock() - - scenario = servers.BootServerFromVolumeAndLiveMigrate( - self.context, clients=mock.Mock()) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario.sleep_between = mock.MagicMock() - scenario._live_migrate = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - fake_volume = fakes.FakeVolumeManager().create() - fake_volume.id = "volume_id" - cinder = mock_block_storage.return_value - cinder.create_volume.return_value = fake_volume - - scenario.run("img", 0, 5, volume_type=None, - min_sleep=10, max_sleep=20, fakearg="f") - - cinder.create_volume.assert_called_once_with(5, imageRef="img", - volume_type=None) - - scenario._boot_server.assert_called_once_with( - None, 0, - block_device_mapping={"vda": "volume_id:::1"}, - fakearg="f") - - scenario.sleep_between.assert_called_once_with(10, 20) - - scenario._live_migrate.assert_called_once_with(fake_server, - False, False) - scenario._delete_server.assert_called_once_with(fake_server, - force=False) - - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test_boot_server_attach_created_volume_and_live_migrate( - self, mock_block_storage): - fake_volume = mock.MagicMock() - fake_server = mock.MagicMock() - fake_attachment = mock.MagicMock() - - clients = mock.Mock() - cinder = mock_block_storage.return_value - cinder.create_volume.return_value = fake_volume - - scenario = servers.BootServerAttachCreatedVolumeAndLiveMigrate( - self.context, clients=clients) - - scenario._attach_volume = mock.MagicMock(return_value=fake_attachment) - scenario._detach_volume = mock.MagicMock() - - scenario.sleep_between = mock.MagicMock() - - scenario._live_migrate = mock.MagicMock() - - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._delete_server = mock.MagicMock() - - image = "img" - flavor = "flavor" - size = 5 - boot_kwargs = {"some_var": "asd"} - scenario.run(image, flavor, size, min_sleep=10, max_sleep=20, - boot_server_kwargs=boot_kwargs) - scenario._boot_server.assert_called_once_with(image, flavor, - **boot_kwargs) - cinder.create_volume.assert_called_once_with(size) - scenario._attach_volume.assert_called_once_with(fake_server, - fake_volume) - scenario._detach_volume.assert_called_once_with(fake_server, - fake_volume) - scenario.sleep_between.assert_called_once_with(10, 20) - scenario._live_migrate.assert_called_once_with(fake_server, - False, False) - - cinder.delete_volume.assert_called_once_with(fake_volume) - scenario._delete_server.assert_called_once_with(fake_server) - - def _test_boot_and_migrate_server(self, confirm=False): - fake_server = mock.MagicMock() - - scenario = servers.BootAndMigrateServer(self.context) - scenario.generate_random_name = mock.MagicMock(return_value="name") - scenario._boot_server = mock.MagicMock(return_value=fake_server) - scenario._migrate = mock.MagicMock() - scenario._resize_confirm = mock.MagicMock() - scenario._resize_revert = mock.MagicMock() - scenario._delete_server = mock.MagicMock() - - kwargs = {"confirm": confirm} - scenario.run("img", 0, fakearg="fakearg", **kwargs) - - scenario._boot_server.assert_called_once_with("img", 0, - fakearg="fakearg", - confirm=confirm) - - scenario._migrate.assert_called_once_with(fake_server) - - if confirm: - scenario._resize_confirm.assert_called_once_with(fake_server, - status="ACTIVE") - else: - scenario._resize_revert.assert_called_once_with(fake_server, - status="ACTIVE") - - scenario._delete_server.assert_called_once_with(fake_server) - - def test_boot_and_migrate_server_with_confirm(self): - self._test_boot_and_migrate_server(confirm=True) - - def test_boot_and_migrate_server_with_revert(self): - self._test_boot_and_migrate_server(confirm=False) - - def test_boot_and_rebuild_server(self): - scenario = servers.BootAndRebuildServer(self.context) - scenario._boot_server = mock.Mock() - scenario._rebuild_server = mock.Mock() - scenario._delete_server = mock.Mock() - - from_image = "img1" - to_image = "img2" - flavor = "flavor" - scenario.run(from_image, to_image, flavor, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with(from_image, flavor, - fakearg="fakearg") - server = scenario._boot_server.return_value - scenario._rebuild_server.assert_called_once_with(server, to_image) - scenario._delete_server.assert_called_once_with(server) - - def test_boot_and_show_server(self): - server = fakes.FakeServer() - image = fakes.FakeImage() - flavor = fakes.FakeFlavor() - - scenario = servers.BootAndShowServer(self.context) - scenario._boot_server = mock.MagicMock(return_value=server) - scenario._show_server = mock.MagicMock() - - scenario.run(image, flavor, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with(image, flavor, - fakearg="fakearg") - scenario._show_server.assert_called_once_with(server) - - def test_boot_server_and_list_interfaces(self): - server = fakes.FakeServer() - image = fakes.FakeImage() - flavor = fakes.FakeFlavor() - - scenario = servers.BootServerAndListInterfaces(self.context) - scenario._boot_server = mock.MagicMock(return_value=server) - scenario._list_interfaces = mock.MagicMock() - - scenario.run(image, flavor, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with(image, flavor, - fakearg="fakearg") - scenario._list_interfaces.assert_called_once_with(server) - - @ddt.data({"length": None}, - {"length": 10}) - @ddt.unpack - def test_boot_and_get_console_server(self, length): - server = fakes.FakeServer() - image = fakes.FakeImage() - flavor = fakes.FakeFlavor() - kwargs = {"fakearg": "fakearg"} - - scenario = servers.BootAndGetConsoleOutput(self.context) - scenario._boot_server = mock.MagicMock(return_value=server) - scenario._get_server_console_output = mock.MagicMock() - - scenario.run(image, flavor, length, **kwargs) - - scenario._boot_server.assert_called_once_with(image, flavor, - **kwargs) - scenario._get_server_console_output.assert_called_once_with(server, - length) - - def test_boot_and_get_console_url(self): - server = fakes.FakeServer() - image = fakes.FakeImage() - flavor = fakes.FakeFlavor() - kwargs = {"fakearg": "fakearg"} - - scenario = servers.BootAndGetConsoleUrl(self.context) - scenario._boot_server = mock.MagicMock(return_value=server) - scenario._get_console_url_server = mock.MagicMock() - - scenario.run(image, flavor, console_type="novnc", **kwargs) - - scenario._boot_server.assert_called_once_with(image, flavor, - **kwargs) - scenario._get_console_url_server.assert_called_once_with( - server, "novnc") - - @mock.patch(NOVA_SERVERS_MODULE + ".network_wrapper.wrap") - def test_boot_and_associate_floating_ip(self, mock_wrap): - scenario = servers.BootAndAssociateFloatingIp(self.context) - server = mock.Mock() - scenario._boot_server = mock.Mock(return_value=server) - scenario._associate_floating_ip = mock.Mock() - - image = "img" - flavor = "flavor" - scenario.run(image, flavor, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with(image, flavor, - fakearg="fakearg") - net_wrap = mock_wrap.return_value - net_wrap.create_floating_ip.assert_called_once_with( - tenant_id=server.tenant_id) - scenario._associate_floating_ip.assert_called_once_with( - server, net_wrap.create_floating_ip.return_value["ip"]) - - @mock.patch(NOVA_SERVERS_MODULE + ".network_wrapper.wrap") - def test_boot_server_associate_and_dissociate_floating_ip(self, mock_wrap): - scenario = servers.BootServerAssociateAndDissociateFloatingIP( - self.context) - server = mock.Mock() - scenario._boot_server = mock.Mock(return_value=server) - scenario._associate_floating_ip = mock.Mock() - scenario._dissociate_floating_ip = mock.Mock() - - image = "img" - flavor = "flavor" - scenario.run(image, flavor, fakearg="fakearg") - - scenario._boot_server.assert_called_once_with(image, flavor, - fakearg="fakearg") - net_wrap = mock_wrap.return_value - net_wrap.create_floating_ip.assert_called_once_with( - tenant_id=server.tenant_id) - scenario._associate_floating_ip.assert_called_once_with( - server, net_wrap.create_floating_ip.return_value["ip"]) - scenario._dissociate_floating_ip.assert_called_once_with( - server, net_wrap.create_floating_ip.return_value["ip"]) - - def test_boot_and_update_server(self): - scenario = servers.BootAndUpdateServer(self.context) - scenario._boot_server = mock.Mock() - scenario._update_server = mock.Mock() - - scenario.run("img", "flavor", "desp", fakearg="fakearg") - scenario._boot_server.assert_called_once_with("img", "flavor", - fakearg="fakearg") - scenario._update_server.assert_called_once_with( - scenario._boot_server.return_value, "desp") - - def test_boot_server_and_attach_interface(self): - network_create_args = {"router:external": True} - subnet_create_args = {"allocation_pools": []} - subnet_cidr_start = "10.1.0.0/16" - boot_server_args = {} - net = mock.MagicMock() - subnet = mock.MagicMock() - server = mock.MagicMock() - - scenario = servers.BootServerAndAttachInterface(self.context) - scenario._get_or_create_network = mock.Mock(return_value=net) - scenario._create_subnet = mock.Mock(return_value=subnet) - scenario._boot_server = mock.Mock(return_value=server) - scenario._attach_interface = mock.Mock() - - scenario.run("image", "flavor", - network_create_args=network_create_args, - subnet_create_args=subnet_create_args, - subnet_cidr_start=subnet_cidr_start, - boot_server_args=boot_server_args) - - scenario._get_or_create_network.assert_called_once_with( - network_create_args) - scenario._create_subnet.assert_called_once_with(net, - subnet_create_args, - subnet_cidr_start) - scenario._boot_server.assert_called_once_with("image", "flavor", - **boot_server_args) - scenario._attach_interface.assert_called_once_with( - server, net_id=net["network"]["id"]) - - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test_boot_server_from_volume_snapshot(self, mock_block_storage): - fake_volume = mock.MagicMock(id="volume_id") - fake_snapshot = mock.MagicMock(id="snapshot_id") - - cinder = mock_block_storage.return_value - cinder.create_volume.return_value = fake_volume - cinder.create_snapshot.return_value = fake_snapshot - - scenario = servers.BootServerFromVolumeSnapshot(self.context, - clients=mock.Mock()) - scenario._boot_server = mock.MagicMock() - - scenario.run("img", "flavor", 1, volume_type=None, - auto_assign_nic=False, fakearg="f") - - cinder.create_volume.assert_called_once_with(1, imageRef="img", - volume_type=None) - cinder.create_snapshot.assert_called_once_with("volume_id", - force=False) - scenario._boot_server.assert_called_once_with( - None, "flavor", auto_assign_nic=False, - block_device_mapping={"vda": "snapshot_id:snap::1"}, - fakearg="f") diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_services.py b/tests/unit/plugins/openstack/scenarios/nova/test_services.py deleted file mode 100644 index 16adae32a7..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_services.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2016 IBM Corp. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.nova import services -from tests.unit import test - - -class NovaServicesTestCase(test.TestCase): - - def test_list_services(self): - scenario = services.ListServices() - scenario._list_services = mock.Mock() - scenario.run(host="foo_host", binary="foo_hypervisor") - scenario._list_services.assert_called_once_with("foo_host", - "foo_hypervisor") diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_utils.py b/tests/unit/plugins/openstack/scenarios/nova/test_utils.py deleted file mode 100644 index 7bbef8f592..0000000000 --- a/tests/unit/plugins/openstack/scenarios/nova/test_utils.py +++ /dev/null @@ -1,1240 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.common import cfg -from rally import exceptions as rally_exceptions -from rally.plugins.openstack.scenarios.nova import utils -from tests.unit import fakes -from tests.unit import test - -BM_UTILS = "rally.task.utils" -NOVA_UTILS = "rally.plugins.openstack.scenarios.nova.utils" -CONF = cfg.CONF - - -@ddt.ddt -class NovaScenarioTestCase(test.ScenarioTestCase): - - def setUp(self): - super(NovaScenarioTestCase, self).setUp() - self.server = mock.Mock() - self.server1 = mock.Mock() - self.volume = mock.Mock() - self.floating_ip = mock.Mock() - self.image = mock.Mock() - self.context.update( - {"user": {"id": "fake_user_id", "credential": mock.MagicMock()}, - "tenant": {"id": "fake_tenant"}}) - - def _context_with_secgroup(self, secgroup): - retval = {"user": {"secgroup": secgroup, - "credential": mock.MagicMock()}} - retval.update(self.context) - return retval - - def test__list_servers(self): - servers_list = [] - self.clients("nova").servers.list.return_value = servers_list - nova_scenario = utils.NovaScenario(self.context) - return_servers_list = nova_scenario._list_servers(True) - self.assertEqual(servers_list, return_servers_list) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_servers") - - def test__pick_random_nic(self): - context = {"tenant": {"networks": [{"id": "net_id_1"}, - {"id": "net_id_2"}]}, - "iteration": 0} - nova_scenario = utils.NovaScenario(context=context) - nic1 = nova_scenario._pick_random_nic() - self.assertEqual(nic1, [{"net-id": "net_id_1"}]) - - context["iteration"] = 1 - nova_scenario = utils.NovaScenario(context=context) - nic2 = nova_scenario._pick_random_nic() - # balance to net 2 - self.assertEqual(nic2, [{"net-id": "net_id_2"}]) - - context["iteration"] = 2 - nova_scenario = utils.NovaScenario(context=context) - nic3 = nova_scenario._pick_random_nic() - # balance again, get net 1 - self.assertEqual(nic3, [{"net-id": "net_id_1"}]) - - @ddt.data( - {}, - {"kwargs": {"auto_assign_nic": True}}, - {"kwargs": {"auto_assign_nic": True, "nics": [{"net-id": "baz_id"}]}}, - {"context": {"user": {"secgroup": {"name": "test"}}}}, - {"context": {"user": {"secgroup": {"name": "new8"}}}, - "kwargs": {"security_groups": ["test8"]}}, - {"context": {"user": {"secgroup": {"name": "test1"}}}, - "kwargs": {"security_groups": ["test1"]}}, - ) - @ddt.unpack - def test__boot_server(self, context=None, kwargs=None): - self.clients("nova").servers.create.return_value = self.server - - if context is None: - context = self.context - context.setdefault("user", {}).setdefault("credential", - mock.MagicMock()) - context.setdefault("config", {}) - - nova_scenario = utils.NovaScenario(context=context) - nova_scenario.generate_random_name = mock.Mock() - nova_scenario._pick_random_nic = mock.Mock() - if kwargs is None: - kwargs = {} - kwargs["fakearg"] = "fakearg" - return_server = nova_scenario._boot_server("image_id", "flavor_id", - **kwargs) - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_boot_poll_interval, - timeout=CONF.openstack.nova_server_boot_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_server) - - expected_kwargs = {"fakearg": "fakearg"} - if "nics" in kwargs: - expected_kwargs["nics"] = kwargs["nics"] - elif "auto_assign_nic" in kwargs: - expected_kwargs["nics"] = (nova_scenario._pick_random_nic. - return_value) - - expected_secgroups = set() - if "security_groups" in kwargs: - expected_secgroups.update(kwargs["security_groups"]) - if "secgroup" in context["user"]: - expected_secgroups.add(context["user"]["secgroup"]["name"]) - if expected_secgroups: - expected_kwargs["security_groups"] = list(expected_secgroups) - - self.clients("nova").servers.create.assert_called_once_with( - nova_scenario.generate_random_name.return_value, - "image_id", "flavor_id", **expected_kwargs) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.boot_server") - - def test__boot_server_with_network_exception(self): - self.context.update({"tenant": {"networks": None}}) - - self.clients("nova").servers.create.return_value = self.server - - nova_scenario = utils.NovaScenario( - context=self.context) - self.assertRaises(TypeError, nova_scenario._boot_server, - "image_id", "flavor_id", - auto_assign_nic=True) - - def test__suspend_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._suspend_server(self.server) - self.server.suspend.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["SUSPENDED"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_suspend_poll_interval, - timeout=CONF.openstack.nova_server_suspend_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.suspend_server") - - def test__resume_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._resume_server(self.server) - self.server.resume.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_resume_poll_interval, - timeout=CONF.openstack.nova_server_resume_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.resume_server") - - def test__pause_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._pause_server(self.server) - self.server.pause.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["PAUSED"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_pause_poll_interval, - timeout=CONF.openstack.nova_server_pause_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.pause_server") - - def test__unpause_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._unpause_server(self.server) - self.server.unpause.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_unpause_poll_interval, - timeout=CONF.openstack.nova_server_unpause_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.unpause_server") - - def test__shelve_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._shelve_server(self.server) - self.server.shelve.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["SHELVED_OFFLOADED"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_shelve_poll_interval, - timeout=CONF.openstack.nova_server_shelve_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.shelve_server") - - def test__unshelve_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._unshelve_server(self.server) - self.server.unshelve.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_unshelve_poll_interval, - timeout=CONF.openstack.nova_server_unshelve_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.unshelve_server") - - @mock.patch("rally.plugins.openstack.scenarios.nova.utils.image_service") - def test__create_image(self, mock_image_service): - glance = mock_image_service.Image.return_value - glance.get_image.return_value = self.image - nova_scenario = utils.NovaScenario(context=self.context) - return_image = nova_scenario._create_image(self.server) - self.mock_wait_for_status.mock.assert_called_once_with( - self.image, - ready_statuses=["ACTIVE"], - update_resource=glance.get_image, - check_interval=CONF.openstack. - nova_server_image_create_poll_interval, - timeout=CONF.openstack.nova_server_image_create_timeout) - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_image) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.create_image") - - def test__default_delete_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._delete_server(self.server) - self.server.delete.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_delete_poll_interval, - timeout=CONF.openstack.nova_server_delete_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.delete_server") - - def test__force_delete_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._delete_server(self.server, force=True) - self.server.force_delete.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_delete_poll_interval, - timeout=CONF.openstack.nova_server_delete_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.force_delete_server") - - def test__reboot_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._reboot_server(self.server) - self.server.reboot.assert_called_once_with(reboot_type="HARD") - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_reboot_poll_interval, - timeout=CONF.openstack.nova_server_reboot_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.reboot_server") - - def test__soft_reboot_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._soft_reboot_server(self.server) - self.server.reboot.assert_called_once_with(reboot_type="SOFT") - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_reboot_poll_interval, - timeout=CONF.openstack.nova_server_reboot_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.soft_reboot_server") - - def test__rebuild_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._rebuild_server(self.server, "img", fakearg="fakearg") - self.server.rebuild.assert_called_once_with("img", fakearg="fakearg") - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_rebuild_poll_interval, - timeout=CONF.openstack.nova_server_rebuild_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.rebuild_server") - - def test__start_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._start_server(self.server) - self.server.start.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_start_poll_interval, - timeout=CONF.openstack.nova_server_start_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.start_server") - - def test__stop_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._stop_server(self.server) - self.server.stop.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["SHUTOFF"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_stop_poll_interval, - timeout=CONF.openstack.nova_server_stop_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.stop_server") - - def test__rescue_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._rescue_server(self.server) - self.server.rescue.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["RESCUE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_rescue_poll_interval, - timeout=CONF.openstack.nova_server_rescue_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.rescue_server") - - def test__unrescue_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._unrescue_server(self.server) - self.server.unrescue.assert_called_once_with() - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_unrescue_poll_interval, - timeout=CONF.openstack.nova_server_unrescue_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.unrescue_server") - - def _test_delete_servers(self, force=False): - servers = [self.server, self.server1] - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._delete_servers(servers, force=force) - check_interval = CONF.openstack.nova_server_delete_poll_interval - expected = [] - for server in servers: - expected.append(mock.call( - server, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=check_interval, - timeout=CONF.openstack.nova_server_delete_timeout)) - if force: - server.force_delete.assert_called_once_with() - self.assertFalse(server.delete.called) - else: - server.delete.assert_called_once_with() - self.assertFalse(server.force_delete.called) - - self.mock_wait_for_status.mock.assert_has_calls(expected) - timer_name = "nova.%sdelete_servers" % ("force_" if force else "") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - timer_name) - - def test__default_delete_servers(self): - self._test_delete_servers() - - def test__force_delete_servers(self): - self._test_delete_servers(force=True) - - @mock.patch("rally.plugins.openstack.scenarios.nova.utils.image_service") - def test__delete_image(self, mock_image_service): - glance = mock_image_service.Image.return_value - nova_scenario = utils.NovaScenario(context=self.context, - clients=mock.Mock()) - nova_scenario._delete_image(self.image) - glance.delete_image.assert_called_once_with(self.image.id) - self.mock_wait_for_status.mock.assert_called_once_with( - self.image, - ready_statuses=["deleted", "pending_delete"], - check_deletion=True, - update_resource=glance.get_image, - check_interval=CONF.openstack. - nova_server_image_delete_poll_interval, - timeout=CONF.openstack.nova_server_image_delete_timeout) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.delete_image") - - @ddt.data( - {"requests": 1}, - {"requests": 25}, - {"requests": 2, "instances_amount": 100, "auto_assign_nic": True, - "fakearg": "fake"}, - {"auto_assign_nic": True, "nics": [{"net-id": "foo"}]}, - {"auto_assign_nic": False, "nics": [{"net-id": "foo"}]}) - @ddt.unpack - def test__boot_servers(self, image_id="image", flavor_id="flavor", - requests=1, instances_amount=1, - auto_assign_nic=False, **kwargs): - servers = [mock.Mock() for i in range(instances_amount)] - self.clients("nova").servers.list.return_value = servers - scenario = utils.NovaScenario(context=self.context) - scenario.generate_random_name = mock.Mock() - scenario._pick_random_nic = mock.Mock() - - scenario._boot_servers(image_id, flavor_id, requests, - instances_amount=instances_amount, - auto_assign_nic=auto_assign_nic, - **kwargs) - - expected_kwargs = dict(kwargs) - if auto_assign_nic and "nics" not in kwargs: - expected_kwargs["nics"] = scenario._pick_random_nic.return_value - - create_calls = [ - mock.call( - "%s_%d" % (scenario.generate_random_name.return_value, i), - image_id, flavor_id, - min_count=instances_amount, max_count=instances_amount, - **expected_kwargs) - for i in range(requests)] - self.clients("nova").servers.create.assert_has_calls(create_calls) - - wait_for_status_calls = [ - mock.call( - servers[i], - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_boot_poll_interval, - timeout=CONF.openstack.nova_server_boot_timeout) - for i in range(instances_amount)] - self.mock_wait_for_status.mock.assert_has_calls(wait_for_status_calls) - - self.mock_get_from_manager.mock.assert_has_calls( - [mock.call() for i in range(instances_amount)]) - self._test_atomic_action_timer(scenario.atomic_actions(), - "nova.boot_servers") - - def test__show_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._show_server(self.server) - self.clients("nova").servers.get.assert_called_once_with( - self.server - ) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.show_server") - - def test__get_console_server(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._get_server_console_output(self.server) - self.clients( - "nova").servers.get_console_output.assert_called_once_with( - self.server, length=None) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.get_console_output_server") - - def test__get_console_url(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._get_console_url_server(self.server, "foo") - self.clients( - "nova").servers.get_console_url.assert_called_once_with( - self.server, "foo") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.get_console_url_server") - - def test__associate_floating_ip(self): - nova_scenario = utils.NovaScenario(context=self.context) - neutronclient = nova_scenario.clients("neutron") - neutronclient.list_ports.return_value = {"ports": [{"id": "p1"}, - {"id": "p2"}]} - - fip_ip = "172.168.0.1" - fip_id = "some" - # case #1- an object from neutronclient - floating_ip = {"floating_ip_address": fip_ip, "id": fip_id} - - nova_scenario._associate_floating_ip(self.server, floating_ip) - - neutronclient.update_floatingip.assert_called_once_with( - fip_id, {"floatingip": {"port_id": "p1"}} - ) - # case #2 - an object from network wrapper - neutronclient.update_floatingip.reset_mock() - floating_ip = {"ip": fip_ip, "id": fip_id} - - nova_scenario._associate_floating_ip(self.server, floating_ip) - - neutronclient.update_floatingip.assert_called_once_with( - fip_id, {"floatingip": {"port_id": "p1"}} - ) - - # these should not be called in both cases - self.assertFalse(neutronclient.list_floatingips.called) - # it is an old behavior. let's check that it was not called - self.assertFalse(self.server.add_floating_ip.called) - - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.associate_floating_ip", count=2) - - def test__associate_floating_ip_deprecated_behavior(self): - nova_scenario = utils.NovaScenario(context=self.context) - neutronclient = nova_scenario.clients("neutron") - neutronclient.list_ports.return_value = {"ports": [{"id": "p1"}, - {"id": "p2"}]} - - fip_id = "fip1" - fip_ip = "172.168.0.1" - neutronclient.list_floatingips.return_value = { - "floatingips": [ - {"id": fip_id, "floating_ip_address": fip_ip}, - {"id": "fip2", "floating_ip_address": "127.0.0.1"}]} - - nova_scenario._associate_floating_ip(self.server, fip_ip) - - neutronclient.update_floatingip.assert_called_once_with( - fip_id, {"floatingip": {"port_id": "p1"}} - ) - - neutronclient.list_floatingips.assert_called_once_with( - tenant_id="fake_tenant") - - # it is an old behavior. let's check that it was not called - self.assertFalse(self.server.add_floating_ip.called) - - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.associate_floating_ip") - - def test__dissociate_floating_ip(self): - nova_scenario = utils.NovaScenario(context=self.context) - neutronclient = nova_scenario.clients("neutron") - - fip_ip = "172.168.0.1" - fip_id = "some" - # case #1- an object from neutronclient - floating_ip = {"floating_ip_address": fip_ip, "id": fip_id} - - nova_scenario._dissociate_floating_ip(self.server, floating_ip) - - neutronclient.update_floatingip.assert_called_once_with( - fip_id, {"floatingip": {"port_id": None}} - ) - # case #2 - an object from network wrapper - neutronclient.update_floatingip.reset_mock() - floating_ip = {"ip": fip_ip, "id": fip_id} - - nova_scenario._dissociate_floating_ip(self.server, floating_ip) - - neutronclient.update_floatingip.assert_called_once_with( - fip_id, {"floatingip": {"port_id": None}} - ) - - # these should not be called in both cases - self.assertFalse(neutronclient.list_floatingips.called) - # it is an old behavior. let's check that it was not called - self.assertFalse(self.server.add_floating_ip.called) - - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.dissociate_floating_ip", count=2) - - def test__disassociate_floating_ip_deprecated_behavior(self): - nova_scenario = utils.NovaScenario(context=self.context) - neutronclient = nova_scenario.clients("neutron") - - fip_id = "fip1" - fip_ip = "172.168.0.1" - neutronclient.list_floatingips.return_value = { - "floatingips": [ - {"id": fip_id, "floating_ip_address": fip_ip}, - {"id": "fip2", "floating_ip_address": "127.0.0.1"}]} - - nova_scenario._dissociate_floating_ip(self.server, fip_ip) - - neutronclient.update_floatingip.assert_called_once_with( - fip_id, {"floatingip": {"port_id": None}} - ) - - neutronclient.list_floatingips.assert_called_once_with( - tenant_id="fake_tenant") - - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.dissociate_floating_ip") - - def test__check_ip_address(self): - nova_scenario = utils.NovaScenario(context=self.context) - fake_server = fakes.FakeServerManager().create("test_server", - "image_id_01", - "flavor_id_01") - fake_server.addresses = { - "private": [ - {"version": 4, "addr": "1.2.3.4"}, - ]} - floating_ip = fakes.FakeFloatingIP() - floating_ip.ip = "10.20.30.40" - - # Also test function check_ip_address accept a string as attr - self.assertFalse( - nova_scenario.check_ip_address(floating_ip.ip)(fake_server)) - self.assertTrue( - nova_scenario.check_ip_address(floating_ip.ip, must_exist=False) - (fake_server)) - - fake_server.addresses["private"].append( - {"version": 4, "addr": floating_ip.ip} - ) - # Also test function check_ip_address accept an object with attr ip - self.assertTrue( - nova_scenario.check_ip_address(floating_ip) - (fake_server)) - self.assertFalse( - nova_scenario.check_ip_address(floating_ip, must_exist=False) - (fake_server)) - - def test__resize(self): - nova_scenario = utils.NovaScenario(context=self.context) - to_flavor = mock.Mock() - nova_scenario._resize(self.server, to_flavor) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.resize") - - def test__resize_confirm(self): - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._resize_confirm(self.server) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.resize_confirm") - - @ddt.data({}, - {"status": "SHUTOFF"}) - @ddt.unpack - def test__resize_revert(self, status=None): - nova_scenario = utils.NovaScenario(context=self.context) - if status is None: - nova_scenario._resize_revert(self.server) - status = "ACTIVE" - else: - nova_scenario._resize_revert(self.server, status=status) - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=[status], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack. - nova_server_resize_revert_poll_interval, - timeout=CONF.openstack.nova_server_resize_revert_timeout) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.resize_revert") - - @mock.patch("rally.plugins.openstack.services.storage.block.BlockStorage") - def test__update_volume_resource(self, mock_block_storage): - volume = fakes.FakeVolume(id=1) - cinder = mock_block_storage.return_value - cinder.get_volume = mock.MagicMock() - nova_scenario = utils.NovaScenario(context=self.context) - self.assertEqual(cinder.get_volume.return_value, - nova_scenario._update_volume_resource(volume)) - - def test__attach_volume(self): - expect_attach = mock.MagicMock() - device = None - (self.clients("nova").volumes.create_server_volume - .return_value) = expect_attach - nova_scenario = utils.NovaScenario(context=self.context) - attach = nova_scenario._attach_volume(self.server, self.volume, device) - (self.clients("nova").volumes.create_server_volume - .assert_called_once_with(self.server.id, self.volume.id, device)) - self.assertEqual(expect_attach, attach) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.attach_volume") - - def test__list_attachments(self): - expect_attachments = [mock.MagicMock()] - (self.clients("nova").volumes.get_server_volumes - .return_value) = expect_attachments - nova_scenario = utils.NovaScenario(context=self.context) - list_attachments = nova_scenario._list_attachments(self.server.id) - self.assertEqual(expect_attachments, list_attachments) - (self.clients("nova").volumes.get_server_volumes - .assert_called_once_with(self.server.id)) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_attachments") - - def test__detach_volume(self): - attach = mock.MagicMock(id="attach_id") - self.clients("nova").volumes.delete_server_volume.return_value = None - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._detach_volume(self.server, self.volume, attach) - (self.clients("nova").volumes.delete_server_volume - .assert_called_once_with(self.server.id, self.volume.id)) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.detach_volume") - - def test__detach_volume_no_attach(self): - self.clients("nova").volumes.delete_server_volume.return_value = None - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._detach_volume(self.server, self.volume, None) - (self.clients("nova").volumes.delete_server_volume - .assert_called_once_with(self.server.id, self.volume.id)) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.detach_volume") - - def test__live_migrate_server(self): - self.admin_clients("nova").servers.get(return_value=self.server) - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._live_migrate(self.server, - block_migration=False, - disk_over_commit=False, - skip_host_check=True) - - self.mock_wait_for_status.mock.assert_called_once_with( - self.server, - ready_statuses=["ACTIVE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack. - nova_server_live_migrate_poll_interval, - timeout=CONF.openstack.nova_server_live_migrate_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.live_migrate") - - def test__migrate_server(self): - fake_server = self.server - setattr(fake_server, "OS-EXT-SRV-ATTR:host", "a1") - self.clients("nova").servers.get(return_value=fake_server) - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._migrate(fake_server, skip_host_check=True) - - self.mock_wait_for_status.mock.assert_called_once_with( - fake_server, - ready_statuses=["VERIFY_RESIZE"], - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.nova_server_migrate_poll_interval, - timeout=CONF.openstack.nova_server_migrate_timeout) - self.mock_get_from_manager.mock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.migrate") - - self.assertRaises(rally_exceptions.RallyException, - nova_scenario._migrate, - fake_server, skip_host_check=False) - - def test__add_server_secgroups(self): - server = mock.Mock() - fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, "uuid1")] - - nova_scenario = utils.NovaScenario() - security_group = fake_secgroups[0] - result = nova_scenario._add_server_secgroups(server, - security_group.name) - self.assertEqual( - self.clients("nova").servers.add_security_group.return_value, - result) - (self.clients("nova").servers.add_security_group. - assert_called_once_with(server, security_group.name)) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.add_server_secgroups") - - def test__list_keypairs(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._list_keypairs() - self.assertEqual(self.clients("nova").keypairs.list.return_value, - result) - self.clients("nova").keypairs.list.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_keypairs") - - def test__create_keypair(self): - nova_scenario = utils.NovaScenario() - nova_scenario.generate_random_name = mock.Mock( - return_value="rally_nova_keypair_fake") - result = nova_scenario._create_keypair(fakeargs="fakeargs") - self.assertEqual( - self.clients("nova").keypairs.create.return_value.name, - result) - self.clients("nova").keypairs.create.assert_called_once_with( - "rally_nova_keypair_fake", fakeargs="fakeargs") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.create_keypair") - - def test__get_server_group(self): - nova_scenario = utils.NovaScenario() - fakeid = 12345 - result = nova_scenario._get_server_group(fakeid) - self.assertEqual( - self.clients("nova").server_groups.get.return_value, - result) - self.clients("nova").server_groups.get.assert_called_once_with( - fakeid) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.get_server_group") - - def test__create_server_group(self): - nova_scenario = utils.NovaScenario() - nova_scenario.generate_random_name = mock.Mock( - return_value="random_name") - result = nova_scenario._create_server_group(fakeargs="fakeargs") - self.assertEqual( - self.clients("nova").server_groups.create.return_value, - result) - self.clients("nova").server_groups.create.assert_called_once_with( - name="random_name", fakeargs="fakeargs") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.create_server_group") - - def test__delete_server_group(self): - nova_scenario = utils.NovaScenario() - fakeid = 12345 - result = nova_scenario._delete_server_group(fakeid) - self.assertEqual( - self.clients("nova").server_groups.delete.return_value, - result) - self.clients("nova").server_groups.delete.assert_called_once_with( - fakeid) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.delete_server_group") - - def test__list_server_groups(self): - nova_scenario = utils.NovaScenario() - result1 = nova_scenario._list_server_groups(all_projects=False) - result2 = nova_scenario._list_server_groups(all_projects=True) - self.assertEqual(self.clients("nova").server_groups.list.return_value, - result1) - admcli = self.admin_clients("nova") - self.assertEqual(admcli.server_groups.list.return_value, result2) - self.clients("nova").server_groups.list.assert_called_once_with( - False) - self.admin_clients("nova").server_groups.list.assert_called_once_with( - True) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_server_groups", count=2) - - def test__delete_keypair(self): - nova_scenario = utils.NovaScenario() - nova_scenario._delete_keypair("fake_keypair") - self.clients("nova").keypairs.delete.assert_called_once_with( - "fake_keypair") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.delete_keypair") - - def test__get_keypair(self): - nova_scenario = utils.NovaScenario() - nova_scenario._get_keypair("fake_keypair") - self.clients("nova").keypairs.get.assert_called_once_with( - "fake_keypair") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.get_keypair") - - def test__list_hypervisors(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._list_hypervisors(detailed=False) - self.assertEqual( - self.admin_clients("nova").hypervisors.list.return_value, result) - self.admin_clients("nova").hypervisors.list.assert_called_once_with( - False) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_hypervisors") - - def test__statistics_hypervisors(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._statistics_hypervisors() - self.assertEqual( - self.admin_clients("nova").hypervisors.statistics.return_value, - result) - (self.admin_clients("nova").hypervisors.statistics. - assert_called_once_with()) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.statistics_hypervisors") - - def test__get_hypervisor(self): - hypervisor = mock.Mock() - nova_scenario = utils.NovaScenario() - result = nova_scenario._get_hypervisor(hypervisor) - self.assertEqual( - self.admin_clients("nova").hypervisors.get.return_value, - result) - self.admin_clients("nova").hypervisors.get.assert_called_once_with( - hypervisor) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.get_hypervisor") - - def test__search_hypervisors(self): - nova_scenario = utils.NovaScenario() - nova_scenario._search_hypervisors("fake_hostname", servers=False) - - self.admin_clients("nova").hypervisors.search.assert_called_once_with( - "fake_hostname", servers=False) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.search_hypervisors") - - def test__list_interfaces(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._list_interfaces("server") - self.assertEqual( - self.clients("nova").servers.interface_list.return_value, - result) - self.clients("nova").servers.interface_list.assert_called_once_with( - "server") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_interfaces") - - @mock.patch("rally.plugins.openstack.scenarios.nova.utils.image_service") - def test__list_images(self, mock_image_service): - result = utils.NovaScenario(clients=mock.Mock())._list_images() - glance = mock_image_service.Image.return_value - self.assertEqual(glance.list_images.return_value, result) - glance.list_images.assert_called_once_with() - - def test__lock_server(self): - server = mock.Mock() - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._lock_server(server) - server.lock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.lock_server") - - def test__unlock_server(self): - server = mock.Mock() - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario._unlock_server(server) - server.unlock.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.unlock_server") - - def test__delete_network(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._delete_network("fake_net_id") - self.assertEqual( - self.admin_clients("nova").networks.delete.return_value, - result) - self.admin_clients("nova").networks.delete.assert_called_once_with( - "fake_net_id") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.delete_network") - - def test__list_flavors(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._list_flavors(detailed=True, fakearg="fakearg") - self.assertEqual(self.clients("nova").flavors.list.return_value, - result) - self.clients("nova").flavors.list.assert_called_once_with( - True, fakearg="fakearg") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_flavors") - - def test__set_flavor_keys(self): - flavor = mock.MagicMock() - nova_scenario = utils.NovaScenario() - extra_specs = {"fakeargs": "foo"} - flavor.set_keys = mock.MagicMock() - - result = nova_scenario._set_flavor_keys(flavor, extra_specs) - self.assertEqual(flavor.set_keys.return_value, result) - flavor.set_keys.assert_called_once_with(extra_specs) - - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.set_flavor_keys") - - @ddt.data({}, - {"hypervisor": "foo_hypervisor"}) - @ddt.unpack - def test__list_agents(self, hypervisor=None): - nova_scenario = utils.NovaScenario() - result = nova_scenario._list_agents(hypervisor) - self.assertEqual( - self.admin_clients("nova").agents.list.return_value, result) - self.admin_clients("nova").agents.list.assert_called_once_with( - hypervisor) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_agents") - - def test__list_aggregates(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._list_aggregates() - self.assertEqual( - self.admin_clients("nova").aggregates.list.return_value, result) - self.admin_clients("nova").aggregates.list.assert_called_once_with() - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_aggregates") - - def test__list_availability_zones(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._list_availability_zones(detailed=True) - self.assertEqual( - self.admin_clients("nova").availability_zones.list.return_value, - result) - avail_zones_client = self.admin_clients("nova").availability_zones - avail_zones_client.list.assert_called_once_with(True) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_availability_zones") - - @ddt.data({}, - {"host": "foo_host"}, - {"binary": "foo_binary"}, - {"host": "foo_host", "binary": "foo_binary"}) - @ddt.unpack - def test__list_services(self, host=None, binary=None): - nova_scenario = utils.NovaScenario() - result = nova_scenario._list_services(host=host, binary=binary) - self.assertEqual(self.admin_clients("nova").services.list.return_value, - result) - self.admin_clients("nova").services.list.assert_called_once_with( - host, binary) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_services") - - def test__list_flavor_access(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._list_flavor_access("foo_id") - self.assertEqual( - self.admin_clients("nova").flavor_access.list.return_value, - result) - self.admin_clients("nova").flavor_access.list.assert_called_once_with( - flavor="foo_id") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.list_flavor_access") - - def test__add_tenant_access(self): - tenant = mock.Mock() - flavor = mock.Mock() - nova_scenario = utils.NovaScenario() - admin_clients = self.admin_clients("nova") - result = nova_scenario._add_tenant_access(flavor.id, tenant.id) - self.assertEqual( - admin_clients.flavor_access.add_tenant_access.return_value, - result) - admin_clients.flavor_access.add_tenant_access.assert_called_once_with( - flavor.id, tenant.id) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.add_tenant_access") - - def test__create_flavor(self): - nova_scenario = utils.NovaScenario() - random_name = "random_name" - nova_scenario.generate_random_name = mock.Mock( - return_value=random_name) - result = nova_scenario._create_flavor(500, 1, 1, - fakearg="fakearg") - self.assertEqual( - self.admin_clients("nova").flavors.create.return_value, - result) - self.admin_clients("nova").flavors.create.assert_called_once_with( - random_name, 500, 1, 1, fakearg="fakearg") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.create_flavor") - - def test__get_flavor(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._get_flavor("foo_flavor_id") - self.assertEqual( - self.admin_clients("nova").flavors.get.return_value, - result) - self.admin_clients("nova").flavors.get.assert_called_once_with( - "foo_flavor_id") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.get_flavor") - - def test__delete_flavor(self): - nova_scenario = utils.NovaScenario() - result = nova_scenario._delete_flavor("foo_flavor_id") - self.assertEqual( - self.admin_clients("nova").flavors.delete.return_value, - result) - self.admin_clients("nova").flavors.delete.assert_called_once_with( - "foo_flavor_id") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.delete_flavor") - - def test__update_server(self): - server = mock.Mock() - nova_scenario = utils.NovaScenario() - nova_scenario.generate_random_name = mock.Mock( - return_value="new_name") - server.update = mock.Mock() - - result = nova_scenario._update_server(server) - self.assertEqual(result, server.update.return_value) - nova_scenario.generate_random_name.assert_called_once_with() - server.update.assert_called_once_with(name="new_name") - - nova_scenario.generate_random_name.reset_mock() - server.update.reset_mock() - - result = nova_scenario._update_server(server, - description="desp") - self.assertEqual(result, server.update.return_value) - nova_scenario.generate_random_name.assert_called_once_with() - server.update.assert_called_once_with(name="new_name", - description="desp") - - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.update_server", count=2) - - def test_create_aggregate(self): - nova_scenario = utils.NovaScenario(context=self.context) - random_name = "random_name" - nova_scenario.generate_random_name = mock.Mock( - return_value=random_name) - result = nova_scenario._create_aggregate("nova") - self.assertEqual( - self.admin_clients("nova").aggregates.create.return_value, - result) - self.admin_clients("nova").aggregates.create.assert_called_once_with( - random_name, "nova") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.create_aggregate") - - def test_delete_aggregate(self): - nova_scenario = utils.NovaScenario(context=self.context) - result = nova_scenario._delete_aggregate("fake_aggregate") - self.assertEqual( - self.admin_clients("nova").aggregates.delete.return_value, - result) - self.admin_clients("nova").aggregates.delete.assert_called_once_with( - "fake_aggregate") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.delete_aggregate") - - def test_get_aggregate_details(self): - nova_scenario = utils.NovaScenario(context=self.context) - result = nova_scenario._get_aggregate_details("fake_aggregate") - self.assertEqual( - self.admin_clients("nova").aggregates.get_details.return_value, - result) - self.admin_clients( - "nova").aggregates.get_details.assert_called_once_with( - "fake_aggregate") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.get_aggregate_details") - - def test_update_aggregate(self): - aggregate = mock.Mock() - nova_scenario = utils.NovaScenario(context=self.context) - nova_scenario.generate_random_name = mock.Mock( - return_value="random_name") - values = {"name": "random_name", - "availability_zone": "random_name"} - result = nova_scenario._update_aggregate(aggregate=aggregate) - self.assertEqual( - self.admin_clients("nova").aggregates.update.return_value, - result) - self.admin_clients("nova").aggregates.update.assert_called_once_with( - aggregate, values) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.update_aggregate") - - def test_aggregate_add_host(self): - nova_scenario = utils.NovaScenario(context=self.context) - result = nova_scenario._aggregate_add_host("fake_agg", "fake_host") - self.assertEqual( - self.admin_clients("nova").aggregates.add_host.return_value, - result) - self.admin_clients("nova").aggregates.add_host.assert_called_once_with( - "fake_agg", "fake_host") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.aggregate_add_host") - - def test_aggregate_remove_host(self): - nova_scenario = utils.NovaScenario(context=self.context) - result = nova_scenario._aggregate_remove_host("fake_agg", "fake_host") - self.assertEqual( - self.admin_clients("nova").aggregates.remove_host.return_value, - result) - self.admin_clients( - "nova").aggregates.remove_host.assert_called_once_with( - "fake_agg", "fake_host") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.aggregate_remove_host") - - def test__uptime_hypervisor(self): - nova_scenario = utils.NovaScenario() - nova_scenario._uptime_hypervisor("fake_hostname") - - self.admin_clients("nova").hypervisors.uptime.assert_called_once_with( - "fake_hostname") - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.uptime_hypervisor") - - def test__attach_interface(self): - fake_server = mock.Mock() - nova_scenario = utils.NovaScenario() - - result = nova_scenario._attach_interface(fake_server, net_id="id") - self.assertEqual( - self.clients("nova").servers.interface_attach.return_value, - result) - self.clients("nova").servers.interface_attach.assert_called_once_with( - fake_server, None, "id", None) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.attach_interface") - - def test_aggregate_set_metadata(self): - nova_scenario = utils.NovaScenario(context=self.context) - fake_metadata = {"test_metadata": "true"} - result = nova_scenario._aggregate_set_metadata("fake_aggregate", - fake_metadata) - self.assertEqual( - self.admin_clients("nova").aggregates.set_metadata.return_value, - result) - self.admin_clients( - "nova").aggregates.set_metadata.assert_called_once_with( - "fake_aggregate", fake_metadata) - self._test_atomic_action_timer(nova_scenario.atomic_actions(), - "nova.aggregate_set_metadata") diff --git a/tests/unit/plugins/openstack/scenarios/quotas/__init__.py b/tests/unit/plugins/openstack/scenarios/quotas/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/quotas/test_quotas.py b/tests/unit/plugins/openstack/scenarios/quotas/test_quotas.py deleted file mode 100644 index b9c023e4f4..0000000000 --- a/tests/unit/plugins/openstack/scenarios/quotas/test_quotas.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2014: Kylin Cloud -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.quotas import quotas -from tests.unit import test - - -class QuotasTestCase(test.ScenarioTestCase): - - def setUp(self): - super(QuotasTestCase, self).setUp() - self.context.update({ - "user": { - "tenant_id": "fake", - "credential": mock.MagicMock() - }, - "tenant": {"id": "fake"} - }) - - def test_nova_get(self): - scenario = quotas.NovaGet(self.context) - scenario._get_quotas = mock.MagicMock() - scenario.run() - scenario._get_quotas.assert_called_once_with("nova", "fake") - - def test_cinder_get(self): - scenario = quotas.CinderGet(self.context) - scenario._get_quotas = mock.MagicMock() - scenario.run() - scenario._get_quotas.assert_called_once_with("cinder", "fake") - - def test_nova_update(self): - scenario = quotas.NovaUpdate(self.context) - scenario._update_quotas = mock.MagicMock() - scenario.run(max_quota=1024) - scenario._update_quotas.assert_called_once_with("nova", "fake", 1024) - - def test_nova_update_and_delete(self): - scenario = quotas.NovaUpdateAndDelete(self.context) - scenario._update_quotas = mock.MagicMock() - scenario._delete_quotas = mock.MagicMock() - scenario.run(max_quota=1024) - scenario._update_quotas.assert_called_once_with("nova", "fake", 1024) - scenario._delete_quotas.assert_called_once_with("nova", "fake") - - def test_cinder_update(self): - scenario = quotas.CinderUpdate(self.context) - scenario._update_quotas = mock.MagicMock() - scenario.run(max_quota=1024) - scenario._update_quotas.assert_called_once_with("cinder", "fake", 1024) - - def test_cinder_update_and_delete(self): - scenario = quotas.CinderUpdateAndDelete(self.context) - scenario._update_quotas = mock.MagicMock() - scenario._delete_quotas = mock.MagicMock() - scenario.run(max_quota=1024) - scenario._update_quotas.assert_called_once_with("cinder", "fake", 1024) - scenario._delete_quotas.assert_called_once_with("cinder", "fake") - - def test_neutron_update(self): - scenario = quotas.NeutronUpdate(self.context) - scenario._update_quotas = mock.MagicMock() - mock_quota_update_fn = self.admin_clients("neutron").update_quota - scenario.run(max_quota=1024) - scenario._update_quotas.assert_called_once_with("neutron", "fake", - 1024, - mock_quota_update_fn) diff --git a/tests/unit/plugins/openstack/scenarios/quotas/test_utils.py b/tests/unit/plugins/openstack/scenarios/quotas/test_utils.py deleted file mode 100644 index 090e5ffe61..0000000000 --- a/tests/unit/plugins/openstack/scenarios/quotas/test_utils.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright 2014: Kylin Cloud -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.quotas import utils -from tests.unit import test - - -class QuotasScenarioTestCase(test.ScenarioTestCase): - - def test__update_quotas(self): - tenant_id = "fake_tenant" - quotas = { - "metadata_items": 10, - "key_pairs": 10, - "injected_file_content_bytes": 1024, - "injected_file_path_bytes": 1024, - "ram": 5120, - "instances": 10, - "injected_files": 10, - "cores": 10, - } - self.admin_clients("nova").quotas.update.return_value = quotas - scenario = utils.QuotasScenario(self.context) - scenario._generate_quota_values = mock.MagicMock(return_value=quotas) - - result = scenario._update_quotas("nova", tenant_id) - - self.assertEqual(quotas, result) - self.admin_clients("nova").quotas.update.assert_called_once_with( - tenant_id, **quotas) - self._test_atomic_action_timer(scenario.atomic_actions(), - "quotas.update_quotas") - - def test__update_quotas_fn(self): - tenant_id = "fake_tenant" - quotas = { - "metadata_items": 10, - "key_pairs": 10, - "injected_file_content_bytes": 1024, - "injected_file_path_bytes": 1024, - "ram": 5120, - "instances": 10, - "injected_files": 10, - "cores": 10, - } - self.admin_clients("nova").quotas.update.return_value = quotas - scenario = utils.QuotasScenario(self.context) - scenario._generate_quota_values = mock.MagicMock(return_value=quotas) - - mock_quota = mock.Mock(return_value=quotas) - - result = scenario._update_quotas("nova", tenant_id, - quota_update_fn=mock_quota) - - self.assertEqual(quotas, result) - self._test_atomic_action_timer(scenario.atomic_actions(), - "quotas.update_quotas") - - def test__generate_quota_values_nova(self): - max_quota = 1024 - scenario = utils.QuotasScenario(self.context) - quotas = scenario._generate_quota_values(max_quota, "nova") - for k, v in quotas.items(): - self.assertGreaterEqual(v, -1) - self.assertLessEqual(v, max_quota) - - def test__generate_quota_values_cinder(self): - max_quota = 1024 - scenario = utils.QuotasScenario(self.context) - quotas = scenario._generate_quota_values(max_quota, "cinder") - for k, v in quotas.items(): - self.assertGreaterEqual(v, -1) - self.assertLessEqual(v, max_quota) - - def test__generate_quota_values_neutron(self): - max_quota = 1024 - scenario = utils.QuotasScenario(self.context) - quotas = scenario._generate_quota_values(max_quota, "neutron") - for v in quotas.values(): - for v1 in v.values(): - for v2 in v1.values(): - self.assertGreaterEqual(v2, -1) - self.assertLessEqual(v2, max_quota) - - def test__delete_quotas(self): - tenant_id = "fake_tenant" - scenario = utils.QuotasScenario(self.context) - scenario._delete_quotas("nova", tenant_id) - - self.admin_clients("nova").quotas.delete.assert_called_once_with( - tenant_id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "quotas.delete_quotas") - - def test__get_quotas(self): - tenant_id = "fake_tenant" - scenario = utils.QuotasScenario(self.context) - scenario._get_quotas("nova", tenant_id) - - self.admin_clients("nova").quotas.get.assert_called_once_with( - tenant_id) - self._test_atomic_action_timer(scenario.atomic_actions(), - "quotas.get_quotas") diff --git a/tests/unit/plugins/openstack/scenarios/sahara/__init__.py b/tests/unit/plugins/openstack/scenarios/sahara/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/sahara/test_clusters.py b/tests/unit/plugins/openstack/scenarios/sahara/test_clusters.py deleted file mode 100644 index d4999f04db..0000000000 --- a/tests/unit/plugins/openstack/scenarios/sahara/test_clusters.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.sahara import clusters -from tests.unit import test - -BASE = "rally.plugins.openstack.scenarios.sahara.clusters" - - -class SaharaClustersTestCase(test.ScenarioTestCase): - - @mock.patch("%s.CreateAndDeleteCluster._delete_cluster" % BASE) - @mock.patch("%s.CreateAndDeleteCluster._launch_cluster" % BASE, - return_value=mock.MagicMock(id=42)) - def test_create_and_delete_cluster(self, - mock_launch_cluster, - mock_delete_cluster): - scenario = clusters.CreateAndDeleteCluster(self.context) - - scenario.context = { - "tenant": { - "sahara": { - "image": "test_image", - } - } - } - - scenario.run(master_flavor="test_flavor_m", - worker_flavor="test_flavor_w", - workers_count=5, - plugin_name="test_plugin", - hadoop_version="test_version") - - mock_launch_cluster.assert_called_once_with( - flavor_id=None, - master_flavor_id="test_flavor_m", - worker_flavor_id="test_flavor_w", - image_id="test_image", - workers_count=5, - plugin_name="test_plugin", - hadoop_version="test_version", - floating_ip_pool=None, - volumes_per_node=None, - volumes_size=None, - auto_security_group=None, - security_groups=None, - node_configs=None, - cluster_configs=None, - enable_anti_affinity=False, - enable_proxy=False, - use_autoconfig=True) - - mock_delete_cluster.assert_called_once_with( - mock_launch_cluster.return_value) - - @mock.patch("%s.CreateAndDeleteCluster._delete_cluster" % BASE) - @mock.patch("%s.CreateAndDeleteCluster._launch_cluster" % BASE, - return_value=mock.MagicMock(id=42)) - def test_create_and_delete_cluster_deprecated_flavor(self, - mock_launch_cluster, - mock_delete_cluster): - scenario = clusters.CreateAndDeleteCluster(self.context) - - scenario.context = { - "tenant": { - "sahara": { - "image": "test_image", - } - } - } - scenario.run(flavor="test_deprecated_arg", - master_flavor=None, - worker_flavor=None, - workers_count=5, - plugin_name="test_plugin", - hadoop_version="test_version") - - mock_launch_cluster.assert_called_once_with( - flavor_id="test_deprecated_arg", - master_flavor_id=None, - worker_flavor_id=None, - image_id="test_image", - workers_count=5, - plugin_name="test_plugin", - hadoop_version="test_version", - floating_ip_pool=None, - volumes_per_node=None, - volumes_size=None, - auto_security_group=None, - security_groups=None, - node_configs=None, - cluster_configs=None, - enable_anti_affinity=False, - enable_proxy=False, - use_autoconfig=True) - - mock_delete_cluster.assert_called_once_with( - mock_launch_cluster.return_value) - - @mock.patch("%s.CreateScaleDeleteCluster._delete_cluster" % BASE) - @mock.patch("%s.CreateScaleDeleteCluster._scale_cluster" % BASE) - @mock.patch("%s.CreateScaleDeleteCluster._launch_cluster" % BASE, - return_value=mock.MagicMock(id=42)) - def test_create_scale_delete_cluster(self, - mock_launch_cluster, - mock_scale_cluster, - mock_delete_cluster): - self.clients("sahara").clusters.get.return_value = mock.MagicMock( - id=42, status="active" - ) - scenario = clusters.CreateScaleDeleteCluster(self.context) - - scenario.context = { - "tenant": { - "sahara": { - "image": "test_image", - } - } - } - scenario.run(master_flavor="test_flavor_m", - worker_flavor="test_flavor_w", - workers_count=5, - deltas=[1, -1], - plugin_name="test_plugin", - hadoop_version="test_version") - - mock_launch_cluster.assert_called_once_with( - flavor_id=None, - master_flavor_id="test_flavor_m", - worker_flavor_id="test_flavor_w", - image_id="test_image", - workers_count=5, - plugin_name="test_plugin", - hadoop_version="test_version", - floating_ip_pool=None, - volumes_per_node=None, - volumes_size=None, - auto_security_group=None, - security_groups=None, - node_configs=None, - cluster_configs=None, - enable_anti_affinity=False, - enable_proxy=False, - use_autoconfig=True) - - mock_scale_cluster.assert_has_calls([ - mock.call( - self.clients("sahara").clusters.get.return_value, - 1), - mock.call( - self.clients("sahara").clusters.get.return_value, - -1), - ]) - - mock_delete_cluster.assert_called_once_with( - self.clients("sahara").clusters.get.return_value) diff --git a/tests/unit/plugins/openstack/scenarios/sahara/test_jobs.py b/tests/unit/plugins/openstack/scenarios/sahara/test_jobs.py deleted file mode 100644 index ae0552c78c..0000000000 --- a/tests/unit/plugins/openstack/scenarios/sahara/test_jobs.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import cfg -from rally.plugins.openstack.scenarios.sahara import jobs -from tests.unit import test - -CONF = cfg.CONF - -BASE = "rally.plugins.openstack.scenarios.sahara.jobs" - - -class SaharaJobTestCase(test.ScenarioTestCase): - - def setUp(self): - super(SaharaJobTestCase, self).setUp() - - self.context = test.get_test_context() - CONF.set_override("sahara_cluster_check_interval", 0, "openstack") - CONF.set_override("sahara_job_check_interval", 0, "openstack") - - @mock.patch("%s.CreateLaunchJob._run_job_execution" % BASE) - def test_create_launch_job_java(self, mock_run_job): - self.clients("sahara").jobs.create.return_value = mock.MagicMock( - id="42") - - self.context.update({ - "tenant": { - "sahara": { - "image": "test_image", - "mains": ["main_42"], - "libs": ["lib_42"], - "cluster": "cl_42", - "input": "in_42" - } - } - }) - scenario = jobs.CreateLaunchJob(self.context) - scenario.generate_random_name = mock.Mock( - return_value="job_42") - - scenario.run(job_type="java", - configs={"conf_key": "conf_val"}, - job_idx=0) - self.clients("sahara").jobs.create.assert_called_once_with( - name="job_42", - type="java", - description="", - mains=["main_42"], - libs=["lib_42"] - ) - - mock_run_job.assert_called_once_with( - job_id="42", - cluster_id="cl_42", - input_id=None, - output_id=None, - configs={"conf_key": "conf_val"}, - job_idx=0 - ) - - @mock.patch("%s.CreateLaunchJob._run_job_execution" % BASE) - @mock.patch("%s.CreateLaunchJob._create_output_ds" % BASE, - return_value=mock.MagicMock(id="out_42")) - def test_create_launch_job_pig(self, - mock_create_output, - mock_run_job): - self.clients("sahara").jobs.create.return_value = mock.MagicMock( - id="42") - - self.context.update({ - "tenant": { - "sahara": { - "image": "test_image", - "mains": ["main_42"], - "libs": ["lib_42"], - "cluster": "cl_42", - "input": "in_42" - } - } - }) - scenario = jobs.CreateLaunchJob(self.context) - scenario.generate_random_name = mock.Mock(return_value="job_42") - - scenario.run(job_type="pig", - configs={"conf_key": "conf_val"}, - job_idx=0) - self.clients("sahara").jobs.create.assert_called_once_with( - name="job_42", - type="pig", - description="", - mains=["main_42"], - libs=["lib_42"] - ) - - mock_run_job.assert_called_once_with( - job_id="42", - cluster_id="cl_42", - input_id="in_42", - output_id="out_42", - configs={"conf_key": "conf_val"}, - job_idx=0 - ) - - @mock.patch("%s.CreateLaunchJob._run_job_execution" % BASE) - @mock.patch("%s.CreateLaunchJob.generate_random_name" % BASE, - return_value="job_42") - def test_create_launch_job_sequence(self, - mock__random_name, - mock_run_job): - self.clients("sahara").jobs.create.return_value = mock.MagicMock( - id="42") - - self.context.update({ - "tenant": { - "sahara": { - "image": "test_image", - "mains": ["main_42"], - "libs": ["lib_42"], - "cluster": "cl_42", - "input": "in_42" - } - } - }) - scenario = jobs.CreateLaunchJobSequence(self.context) - - scenario.run( - jobs=[ - { - "job_type": "java", - "configs": {"conf_key": "conf_val"} - }, { - "job_type": "java", - "configs": {"conf_key2": "conf_val2"} - }]) - - jobs_create_call = mock.call(name="job_42", - type="java", - description="", - mains=["main_42"], - libs=["lib_42"]) - - self.clients("sahara").jobs.create.assert_has_calls( - [jobs_create_call, jobs_create_call]) - - mock_run_job.assert_has_calls([ - mock.call(job_id="42", - cluster_id="cl_42", - input_id=None, - output_id=None, - configs={"conf_key": "conf_val"}, - job_idx=0), - mock.call(job_id="42", - cluster_id="cl_42", - input_id=None, - output_id=None, - configs={"conf_key2": "conf_val2"}, - job_idx=1) - ]) - - @mock.patch("%s.CreateLaunchJob.generate_random_name" % BASE, - return_value="job_42") - @mock.patch("%s.CreateLaunchJobSequenceWithScaling" - "._scale_cluster" % BASE) - @mock.patch("%s.CreateLaunchJob._run_job_execution" % BASE) - def test_create_launch_job_sequence_with_scaling( - self, - mock_run_job, - mock_create_launch_job_sequence_with_scaling__scale_cluster, - mock_create_launch_job_generate_random_name - ): - self.clients("sahara").jobs.create.return_value = mock.MagicMock( - id="42") - self.clients("sahara").clusters.get.return_value = mock.MagicMock( - id="cl_42", status="active") - - self.context.update({ - "tenant": { - "sahara": { - "image": "test_image", - "mains": ["main_42"], - "libs": ["lib_42"], - "cluster": "cl_42", - "input": "in_42" - } - } - }) - scenario = jobs.CreateLaunchJobSequenceWithScaling(self.context) - - scenario.run( - jobs=[ - { - "job_type": "java", - "configs": {"conf_key": "conf_val"} - }, { - "job_type": "java", - "configs": {"conf_key2": "conf_val2"} - }], - deltas=[1, -1]) - - jobs_create_call = mock.call(name="job_42", - type="java", - description="", - mains=["main_42"], - libs=["lib_42"]) - - self.clients("sahara").jobs.create.assert_has_calls( - [jobs_create_call, jobs_create_call]) - - je_0 = mock.call(job_id="42", cluster_id="cl_42", input_id=None, - output_id=None, configs={"conf_key": "conf_val"}, - job_idx=0) - je_1 = mock.call(job_id="42", cluster_id="cl_42", input_id=None, - output_id=None, - configs={"conf_key2": "conf_val2"}, job_idx=1) - mock_run_job.assert_has_calls([je_0, je_1, je_0, je_1, je_0, je_1]) diff --git a/tests/unit/plugins/openstack/scenarios/sahara/test_node_group_templates.py b/tests/unit/plugins/openstack/scenarios/sahara/test_node_group_templates.py deleted file mode 100644 index c5ae0f22bd..0000000000 --- a/tests/unit/plugins/openstack/scenarios/sahara/test_node_group_templates.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.sahara import (node_group_templates - as ngts) -from tests.unit import test - -BASE = "rally.plugins.openstack.scenarios.sahara.node_group_templates" - - -class SaharaNodeGroupTemplatesTestCase(test.TestCase): - - def setUp(self): - super(SaharaNodeGroupTemplatesTestCase, self).setUp() - self.context = test.get_test_context() - - @mock.patch("%s.CreateAndListNodeGroupTemplates" - "._list_node_group_templates" % BASE) - @mock.patch("%s.CreateAndListNodeGroupTemplates" - "._create_master_node_group_template" % BASE) - @mock.patch("%s.CreateAndListNodeGroupTemplates" - "._create_worker_node_group_template" % BASE) - def test_create_and_list_node_group_templates(self, - mock_create_worker, - mock_create_master, - mock_list_group): - ngts.CreateAndListNodeGroupTemplates(self.context).run( - "test_flavor", "test_plugin", "test_version") - - mock_create_master.assert_called_once_with( - flavor_id="test_flavor", - plugin_name="test_plugin", - hadoop_version="test_version", - use_autoconfig=True) - mock_create_worker.assert_called_once_with( - flavor_id="test_flavor", - plugin_name="test_plugin", - hadoop_version="test_version", - use_autoconfig=True) - mock_list_group.assert_called_once_with() - - @mock.patch("%s.CreateDeleteNodeGroupTemplates" - "._delete_node_group_template" % BASE) - @mock.patch("%s.CreateDeleteNodeGroupTemplates" - "._create_master_node_group_template" % BASE) - @mock.patch("%s.CreateDeleteNodeGroupTemplates" - "._create_worker_node_group_template" % BASE) - def test_create_delete_node_group_templates(self, - mock_create_worker, - mock_create_master, - mock_delete_group): - ngts.CreateDeleteNodeGroupTemplates(self.context).run( - "test_flavor", "test_plugin", "test_version") - - mock_create_master.assert_called_once_with( - flavor_id="test_flavor", - plugin_name="test_plugin", - hadoop_version="test_version", - use_autoconfig=True) - mock_create_worker.assert_called_once_with( - flavor_id="test_flavor", - plugin_name="test_plugin", - hadoop_version="test_version", - use_autoconfig=True) - - mock_delete_group.assert_has_calls(calls=[ - mock.call(mock_create_master.return_value), - mock.call(mock_create_worker.return_value)]) diff --git a/tests/unit/plugins/openstack/scenarios/sahara/test_utils.py b/tests/unit/plugins/openstack/scenarios/sahara/test_utils.py deleted file mode 100644 index 8476f393e3..0000000000 --- a/tests/unit/plugins/openstack/scenarios/sahara/test_utils.py +++ /dev/null @@ -1,540 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import mock -from saharaclient.api import base as sahara_base - -from rally.common import cfg -from rally import consts -from rally import exceptions -from rally.plugins.openstack.scenarios.sahara import utils -from tests.unit import test - -CONF = cfg.CONF - -SAHARA_UTILS = "rally.plugins.openstack.scenarios.sahara.utils" - - -class SaharaScenarioTestCase(test.ScenarioTestCase): - # NOTE(stpierre): the Sahara utils generally do funny stuff with - # wait_for() calls -- frequently the is_ready and - # update_resource arguments are functions defined in the Sahara - # utils themselves instead of the more standard resource_is() and - # get_from_manager() calls. As a result, the tests below do more - # integrated/functional testing of wait_for() calls, and we can't - # just mock out wait_for and friends the way we usually do. - patch_task_utils = False - - def setUp(self): - super(SaharaScenarioTestCase, self).setUp() - - CONF.set_override("sahara_cluster_check_interval", 0, "openstack") - CONF.set_override("sahara_job_check_interval", 0, "openstack") - - def test_list_node_group_templates(self): - ngts = [] - self.clients("sahara").node_group_templates.list.return_value = ngts - - scenario = utils.SaharaScenario(self.context) - return_ngts_list = scenario._list_node_group_templates() - - self.assertEqual(ngts, return_ngts_list) - self._test_atomic_action_timer(scenario.atomic_actions(), - "sahara.list_node_group_templates") - - @mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name", - return_value="random_name") - @mock.patch(SAHARA_UTILS + ".sahara_consts") - def test_create_node_group_templates( - self, mock_sahara_consts, - mock_generate_random_name): - - scenario = utils.SaharaScenario(self.context) - mock_processes = { - "test_plugin": { - "test_version": { - "master": ["p1"], - "worker": ["p2"] - } - } - } - - mock_sahara_consts.NODE_PROCESSES = mock_processes - - scenario._create_master_node_group_template( - flavor_id="test_flavor", - plugin_name="test_plugin", - hadoop_version="test_version", - use_autoconfig=True - ) - scenario._create_worker_node_group_template( - flavor_id="test_flavor", - plugin_name="test_plugin", - hadoop_version="test_version", - use_autoconfig=True - ) - - create_calls = [ - mock.call( - name="random_name", - plugin_name="test_plugin", - hadoop_version="test_version", - flavor_id="test_flavor", - node_processes=["p1"], - use_autoconfig=True), - mock.call( - name="random_name", - plugin_name="test_plugin", - hadoop_version="test_version", - flavor_id="test_flavor", - node_processes=["p2"], - use_autoconfig=True - )] - self.clients("sahara").node_group_templates.create.assert_has_calls( - create_calls) - - self._test_atomic_action_timer( - scenario.atomic_actions(), - "sahara.create_master_node_group_template") - self._test_atomic_action_timer( - scenario.atomic_actions(), - "sahara.create_worker_node_group_template") - - def test_delete_node_group_templates(self): - scenario = utils.SaharaScenario(self.context) - ng = mock.MagicMock(id=42) - - scenario._delete_node_group_template(ng) - - delete_mock = self.clients("sahara").node_group_templates.delete - delete_mock.assert_called_once_with(42) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "sahara.delete_node_group_template") - - @mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name", - return_value="random_name") - @mock.patch(SAHARA_UTILS + ".sahara_consts") - def test_launch_cluster(self, mock_sahara_consts, - mock_generate_random_name): - - self.context.update({ - "tenant": { - "networks": [ - { - "id": "test_neutron_id", - "router_id": "test_router_id" - } - ] - } - }) - - self.clients("services").values.return_value = [ - consts.Service.NEUTRON - ] - - scenario = utils.SaharaScenario(context=self.context) - - mock_processes = { - "test_plugin": { - "test_version": { - "master": ["p1"], - "worker": ["p2"] - } - } - } - - mock_configs = { - "test_plugin": { - "test_version": { - "target": "HDFS", - "config_name": "dfs.replication" - } - } - } - - floating_ip_pool_uuid = str(uuid.uuid4()) - node_groups = [ - { - "name": "master-ng", - "flavor_id": "test_flavor_m", - "node_processes": ["p1"], - "floating_ip_pool": floating_ip_pool_uuid, - "count": 1, - "auto_security_group": True, - "security_groups": ["g1", "g2"], - "node_configs": {"HDFS": {"local_config": "local_value"}}, - "use_autoconfig": True, - }, { - "name": "worker-ng", - "flavor_id": "test_flavor_w", - "node_processes": ["p2"], - "floating_ip_pool": floating_ip_pool_uuid, - "volumes_per_node": 5, - "volumes_size": 10, - "count": 42, - "auto_security_group": True, - "security_groups": ["g1", "g2"], - "node_configs": {"HDFS": {"local_config": "local_value"}}, - "use_autoconfig": True, - } - ] - - mock_sahara_consts.NODE_PROCESSES = mock_processes - mock_sahara_consts.REPLICATION_CONFIGS = mock_configs - - self.clients("sahara").clusters.create.return_value.id = ( - "test_cluster_id") - - self.clients("sahara").clusters.get.return_value.status = ( - "active") - - scenario._launch_cluster( - plugin_name="test_plugin", - hadoop_version="test_version", - master_flavor_id="test_flavor_m", - worker_flavor_id="test_flavor_w", - image_id="test_image", - floating_ip_pool=floating_ip_pool_uuid, - volumes_per_node=5, - volumes_size=10, - auto_security_group=True, - security_groups=["g1", "g2"], - workers_count=42, - node_configs={"HDFS": {"local_config": "local_value"}}, - use_autoconfig=True - ) - - self.clients("sahara").clusters.create.assert_called_once_with( - name="random_name", - plugin_name="test_plugin", - hadoop_version="test_version", - node_groups=node_groups, - default_image_id="test_image", - cluster_configs={"HDFS": {"dfs.replication": 3}}, - net_id="test_neutron_id", - anti_affinity=None, - use_autoconfig=True - ) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "sahara.launch_cluster") - - @mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name", - return_value="random_name") - @mock.patch(SAHARA_UTILS + ".sahara_consts") - def test_launch_cluster_with_proxy(self, mock_sahara_consts, - mock_generate_random_name): - - context = { - "tenant": { - "networks": [ - { - "id": "test_neutron_id", - "router_id": "test_router_id" - } - ] - } - } - - self.clients("services").values.return_value = [ - consts.Service.NEUTRON - ] - - scenario = utils.SaharaScenario(context=context) - - mock_processes = { - "test_plugin": { - "test_version": { - "master": ["p1"], - "worker": ["p2"] - } - } - } - - mock_configs = { - "test_plugin": { - "test_version": { - "target": "HDFS", - "config_name": "dfs.replication" - } - } - } - - floating_ip_pool_uuid = str(uuid.uuid4()) - node_groups = [ - { - "name": "master-ng", - "flavor_id": "test_flavor_m", - "node_processes": ["p1"], - "floating_ip_pool": floating_ip_pool_uuid, - "count": 1, - "auto_security_group": True, - "security_groups": ["g1", "g2"], - "node_configs": {"HDFS": {"local_config": "local_value"}}, - "is_proxy_gateway": True, - "use_autoconfig": True, - }, { - "name": "worker-ng", - "flavor_id": "test_flavor_w", - "node_processes": ["p2"], - "volumes_per_node": 5, - "volumes_size": 10, - "count": 40, - "auto_security_group": True, - "security_groups": ["g1", "g2"], - "node_configs": {"HDFS": {"local_config": "local_value"}}, - "use_autoconfig": True, - }, { - "name": "proxy-ng", - "flavor_id": "test_flavor_w", - "node_processes": ["p2"], - "floating_ip_pool": floating_ip_pool_uuid, - "volumes_per_node": 5, - "volumes_size": 10, - "count": 2, - "auto_security_group": True, - "security_groups": ["g1", "g2"], - "node_configs": {"HDFS": {"local_config": "local_value"}}, - "is_proxy_gateway": True, - "use_autoconfig": True, - } - ] - - mock_sahara_consts.NODE_PROCESSES = mock_processes - mock_sahara_consts.REPLICATION_CONFIGS = mock_configs - - self.clients("sahara").clusters.create.return_value = mock.MagicMock( - id="test_cluster_id") - - self.clients("sahara").clusters.get.return_value = mock.MagicMock( - status="active") - - scenario._launch_cluster( - plugin_name="test_plugin", - hadoop_version="test_version", - master_flavor_id="test_flavor_m", - worker_flavor_id="test_flavor_w", - image_id="test_image", - floating_ip_pool=floating_ip_pool_uuid, - volumes_per_node=5, - volumes_size=10, - auto_security_group=True, - security_groups=["g1", "g2"], - workers_count=42, - node_configs={"HDFS": {"local_config": "local_value"}}, - enable_proxy=True, - use_autoconfig=True - ) - - self.clients("sahara").clusters.create.assert_called_once_with( - name="random_name", - plugin_name="test_plugin", - hadoop_version="test_version", - node_groups=node_groups, - default_image_id="test_image", - cluster_configs={"HDFS": {"dfs.replication": 3}}, - net_id="test_neutron_id", - anti_affinity=None, - use_autoconfig=True - ) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "sahara.launch_cluster") - - @mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name", - return_value="random_name") - @mock.patch(SAHARA_UTILS + ".sahara_consts") - def test_launch_cluster_error(self, mock_sahara_consts, - mock_generate_random_name): - - scenario = utils.SaharaScenario(self.context) - mock_processes = { - "test_plugin": { - "test_version": { - "master": ["p1"], - "worker": ["p2"] - } - } - } - - mock_configs = { - "test_plugin": { - "test_version": { - "target": "HDFS", - "config_name": "dfs.replication" - } - } - } - - mock_sahara_consts.NODE_PROCESSES = mock_processes - mock_sahara_consts.REPLICATION_CONFIGS = mock_configs - - self.clients("sahara").clusters.create.return_value = mock.MagicMock( - id="test_cluster_id") - - self.clients("sahara").clusters.get.return_value = mock.MagicMock( - status="error") - - self.assertRaises(exceptions.GetResourceErrorStatus, - scenario._launch_cluster, - plugin_name="test_plugin", - hadoop_version="test_version", - master_flavor_id="test_flavor_m", - worker_flavor_id="test_flavor_w", - image_id="test_image", - floating_ip_pool="test_pool", - volumes_per_node=5, - volumes_size=10, - workers_count=42, - node_configs={"HDFS": {"local_config": - "local_value"}}) - - def test_scale_cluster(self): - scenario = utils.SaharaScenario(self.context) - cluster = mock.MagicMock(id=42, node_groups=[{ - "name": "random_master", - "count": 1 - }, { - "name": "random_worker", - "count": 41 - }]) - self.clients("sahara").clusters.get.return_value = mock.MagicMock( - id=42, - status="active") - - expected_scale_object = { - "resize_node_groups": [{ - "name": "random_worker", - "count": 42 - }] - } - - scenario._scale_cluster(cluster, 1) - self.clients("sahara").clusters.scale.assert_called_once_with( - 42, expected_scale_object) - - def test_delete_cluster(self): - scenario = utils.SaharaScenario(self.context) - cluster = mock.MagicMock(id=42) - self.clients("sahara").clusters.get.side_effect = [ - cluster, sahara_base.APIException() - ] - - scenario._delete_cluster(cluster) - delete_mock = self.clients("sahara").clusters.delete - delete_mock.assert_called_once_with(42) - - cl_get_expected = mock.call(42) - self.clients("sahara").clusters.get.assert_has_calls([cl_get_expected, - cl_get_expected]) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "sahara.delete_cluster") - - @mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name", - return_value="42") - def test_create_output_ds(self, mock_generate_random_name): - self.context.update({ - "sahara": { - "output_conf": { - "output_type": "hdfs", - "output_url_prefix": "hdfs://test_out/" - } - } - }) - - scenario = utils.SaharaScenario(self.context) - scenario._create_output_ds() - - self.clients("sahara").data_sources.create.assert_called_once_with( - name="42", - description="", - data_source_type="hdfs", - url="hdfs://test_out/42" - ) - - @mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name", - return_value="42") - def test_create_output_ds_swift(self, mock_generate_random_name): - self.context.update({ - "sahara": { - "output_conf": { - "output_type": "swift", - "output_url_prefix": "swift://test_out/" - } - } - }) - - scenario = utils.SaharaScenario(self.context) - self.assertRaises(exceptions.RallyException, - scenario._create_output_ds) - - def test_run_job_execution(self): - self.clients("sahara").job_executions.get.side_effect = [ - mock.MagicMock(info={"status": "pending"}, id="42"), - mock.MagicMock(info={"status": "SUCCESS"}, id="42")] - - self.clients("sahara").job_executions.create.return_value = ( - mock.MagicMock(id="42")) - - scenario = utils.SaharaScenario(self.context) - scenario._run_job_execution(job_id="test_job_id", - cluster_id="test_cluster_id", - input_id="test_input_id", - output_id="test_output_id", - configs={"k": "v"}, - job_idx=0) - - self.clients("sahara").job_executions.create.assert_called_once_with( - job_id="test_job_id", - cluster_id="test_cluster_id", - input_id="test_input_id", - output_id="test_output_id", - configs={"k": "v"} - ) - - je_get_expected = mock.call("42") - self.clients("sahara").job_executions.get.assert_has_calls( - [je_get_expected, je_get_expected] - ) - - def test_run_job_execution_fail(self): - self.clients("sahara").job_executions.get.side_effect = [ - mock.MagicMock(info={"status": "pending"}, id="42"), - mock.MagicMock(info={"status": "killed"}, id="42")] - - self.clients("sahara").job_executions.create.return_value = ( - mock.MagicMock(id="42")) - - scenario = utils.SaharaScenario(self.context) - self.assertRaises(exceptions.RallyException, - scenario._run_job_execution, - job_id="test_job_id", - cluster_id="test_cluster_id", - input_id="test_input_id", - output_id="test_output_id", - configs={"k": "v"}, - job_idx=0) - - self.clients("sahara").job_executions.create.assert_called_once_with( - job_id="test_job_id", - cluster_id="test_cluster_id", - input_id="test_input_id", - output_id="test_output_id", - configs={"k": "v"} - ) diff --git a/tests/unit/plugins/openstack/scenarios/senlin/__init__.py b/tests/unit/plugins/openstack/scenarios/senlin/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/senlin/test_clusters.py b/tests/unit/plugins/openstack/scenarios/senlin/test_clusters.py deleted file mode 100644 index 92b26fcbc9..0000000000 --- a/tests/unit/plugins/openstack/scenarios/senlin/test_clusters.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.senlin import clusters -from tests.unit import test - - -class SenlinClustersTestCase(test.ScenarioTestCase): - - def test_create_and_delete_cluster(self): - mock_cluster = mock.Mock() - self.context["tenant"] = {"profile": "fake_profile_id"} - scenario = clusters.CreateAndDeleteCluster(self.context) - scenario._create_cluster = mock.Mock(return_value=mock_cluster) - scenario._delete_cluster = mock.Mock() - - scenario.run(desired_capacity=1, min_size=0, - max_size=3, timeout=60, metadata={"k2": "v2"}) - - scenario._create_cluster.assert_called_once_with("fake_profile_id", - 1, 0, 3, 60, - {"k2": "v2"}) - scenario._delete_cluster.assert_called_once_with(mock_cluster) diff --git a/tests/unit/plugins/openstack/scenarios/senlin/test_utils.py b/tests/unit/plugins/openstack/scenarios/senlin/test_utils.py deleted file mode 100644 index 5ca9c86cc9..0000000000 --- a/tests/unit/plugins/openstack/scenarios/senlin/test_utils.py +++ /dev/null @@ -1,152 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import cfg -from rally import exceptions -from rally.plugins.openstack.scenarios.senlin import utils -from tests.unit import test - -SENLIN_UTILS = "rally.plugins.openstack.scenarios.senlin.utils." -CONF = cfg.CONF - - -class SenlinScenarioTestCase(test.ScenarioTestCase): - - def test_list_cluster(self): - fake_cluster_list = ["cluster1", "cluster2"] - self.admin_clients("senlin").clusters.return_value = fake_cluster_list - scenario = utils.SenlinScenario(self.context) - result = scenario._list_clusters() - - self.assertEqual(list(fake_cluster_list), result) - self.admin_clients("senlin").clusters.assert_called_once_with() - - def test_list_cluster_with_queries(self): - fake_cluster_list = ["cluster1", "cluster2"] - self.admin_clients("senlin").clusters.return_value = fake_cluster_list - scenario = utils.SenlinScenario(self.context) - result = scenario._list_clusters(status="ACTIVE") - - self.assertEqual(list(fake_cluster_list), result) - self.admin_clients("senlin").clusters.assert_called_once_with( - status="ACTIVE") - - @mock.patch(SENLIN_UTILS + "SenlinScenario.generate_random_name", - return_value="test_cluster") - def test_create_cluster(self, mock_generate_random_name): - fake_cluster = mock.Mock(id="fake_cluster_id") - res_cluster = mock.Mock() - self.admin_clients("senlin").create_cluster.return_value = fake_cluster - self.mock_wait_for_status.mock.return_value = res_cluster - scenario = utils.SenlinScenario(self.context) - result = scenario._create_cluster("fake_profile_id", - desired_capacity=1, - min_size=0, - max_size=3, - metadata={"k1": "v1"}, - timeout=60) - - self.assertEqual(res_cluster, result) - self.admin_clients("senlin").create_cluster.assert_called_once_with( - profile_id="fake_profile_id", name="test_cluster", - desired_capacity=1, min_size=0, max_size=3, metadata={"k1": "v1"}, - timeout=60) - self.mock_wait_for_status.mock.assert_called_once_with( - fake_cluster, ready_statuses=["ACTIVE"], - failure_statuses=["ERROR"], - update_resource=scenario._get_cluster, - timeout=CONF.openstack.senlin_action_timeout) - mock_generate_random_name.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "senlin.create_cluster") - - def test_get_cluster(self): - fake_cluster = mock.Mock(id="fake_cluster_id") - scenario = utils.SenlinScenario(context=self.context) - scenario._get_cluster(fake_cluster) - - self.admin_clients("senlin").get_cluster.assert_called_once_with( - "fake_cluster_id") - - def test_get_cluster_notfound(self): - fake_cluster = mock.Mock(id="fake_cluster_id") - ex = Exception() - ex.code = 404 - self.admin_clients("senlin").get_cluster.side_effect = ex - scenario = utils.SenlinScenario(context=self.context) - - self.assertRaises(exceptions.GetResourceNotFound, - scenario._get_cluster, - fake_cluster) - self.admin_clients("senlin").get_cluster.assert_called_once_with( - "fake_cluster_id") - - def test_get_cluster_failed(self): - fake_cluster = mock.Mock(id="fake_cluster_id") - ex = Exception() - ex.code = 500 - self.admin_clients("senlin").get_cluster.side_effect = ex - scenario = utils.SenlinScenario(context=self.context) - - self.assertRaises(exceptions.GetResourceFailure, - scenario._get_cluster, - fake_cluster) - self.admin_clients("senlin").get_cluster.assert_called_once_with( - "fake_cluster_id") - - def test_delete_cluster(self): - fake_cluster = mock.Mock() - scenario = utils.SenlinScenario(context=self.context) - scenario._delete_cluster(fake_cluster) - - self.admin_clients("senlin").delete_cluster.assert_called_once_with( - fake_cluster) - self.mock_wait_for_status.mock.assert_called_once_with( - fake_cluster, ready_statuses=["DELETED"], - failure_statuses=["ERROR"], check_deletion=True, - update_resource=scenario._get_cluster, - timeout=CONF.openstack.senlin_action_timeout) - self._test_atomic_action_timer(scenario.atomic_actions(), - "senlin.delete_cluster") - - @mock.patch(SENLIN_UTILS + "SenlinScenario.generate_random_name", - return_value="test_profile") - def test_create_profile(self, mock_generate_random_name): - test_spec = { - "version": "1.0", - "type": "test_type", - "properties": { - "key1": "value1" - } - } - scenario = utils.SenlinScenario(self.context) - result = scenario._create_profile(test_spec, metadata={"k2": "v2"}) - - self.assertEqual( - self.clients("senlin").create_profile.return_value, result) - self.clients("senlin").create_profile.assert_called_once_with( - spec=test_spec, name="test_profile", metadata={"k2": "v2"}) - mock_generate_random_name.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "senlin.create_profile") - - def test_delete_profile(self): - fake_profile = mock.Mock() - scenario = utils.SenlinScenario(context=self.context) - scenario._delete_profile(fake_profile) - - self.clients("senlin").delete_profile.assert_called_once_with( - fake_profile) - self._test_atomic_action_timer(scenario.atomic_actions(), - "senlin.delete_profile") diff --git a/tests/unit/plugins/openstack/scenarios/swift/__init__.py b/tests/unit/plugins/openstack/scenarios/swift/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/swift/test_objects.py b/tests/unit/plugins/openstack/scenarios/swift/test_objects.py deleted file mode 100644 index 9072e76c7f..0000000000 --- a/tests/unit/plugins/openstack/scenarios/swift/test_objects.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2015 Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.swift import objects -from tests.unit import test - - -@ddt.ddt -class SwiftObjectsTestCase(test.ScenarioTestCase): - - def test_create_container_and_object_then_list_objects(self): - scenario = objects.CreateContainerAndObjectThenListObjects( - self.context) - scenario._create_container = mock.MagicMock(return_value="AA") - scenario._upload_object = mock.MagicMock() - scenario._list_objects = mock.MagicMock() - - scenario.run(objects_per_container=5, object_size=100) - - self.assertEqual(1, scenario._create_container.call_count) - self.assertEqual(5, scenario._upload_object.call_count) - scenario._list_objects.assert_called_once_with("AA") - - def test_create_container_and_object_then_delete_all(self): - scenario = objects.CreateContainerAndObjectThenDeleteAll(self.context) - scenario._create_container = mock.MagicMock(return_value="BB") - scenario._upload_object = mock.MagicMock( - side_effect=[("etaaag", "ooobj_%i" % i) for i in range(3)]) - scenario._delete_object = mock.MagicMock() - scenario._delete_container = mock.MagicMock() - - scenario.run(objects_per_container=3, object_size=10) - - self.assertEqual(1, scenario._create_container.call_count) - self.assertEqual(3, scenario._upload_object.call_count) - scenario._delete_object.assert_has_calls( - [mock.call("BB", "ooobj_%i" % i) for i in range(3)]) - scenario._delete_container.assert_called_once_with("BB") - - def test_create_container_and_object_then_download_object(self): - scenario = objects.CreateContainerAndObjectThenDownloadObject( - self.context - ) - scenario._create_container = mock.MagicMock(return_value="CC") - scenario._upload_object = mock.MagicMock( - side_effect=[("etaaaag", "obbbj_%i" % i) for i in range(2)]) - scenario._download_object = mock.MagicMock() - - scenario.run(objects_per_container=2, object_size=50) - - self.assertEqual(1, scenario._create_container.call_count) - self.assertEqual(2, scenario._upload_object.call_count) - scenario._download_object.assert_has_calls( - [mock.call("CC", "obbbj_%i" % i) for i in range(2)]) - - @ddt.data(1, 5) - def test_list_objects_in_containers(self, num_cons): - con_list = [{"name": "cooon_%s" % i} for i in range(num_cons)] - scenario = objects.ListObjectsInContainers(self.context) - scenario._list_containers = mock.MagicMock(return_value=("header", - con_list)) - scenario._list_objects = mock.MagicMock() - - scenario.run() - scenario._list_containers.assert_called_once_with() - con_calls = [mock.call(container["name"]) - for container in con_list] - scenario._list_objects.assert_has_calls(con_calls) - - @ddt.data([1, 1], [1, 2], [2, 1], [3, 5]) - @ddt.unpack - def test_list_and_download_objects_in_containers(self, num_cons, num_objs): - con_list = [{"name": "connn_%s" % i} for i in range(num_cons)] - obj_list = [{"name": "ooobj_%s" % i} for i in range(num_objs)] - scenario = objects.ListAndDownloadObjectsInContainers(self.context) - scenario._list_containers = mock.MagicMock(return_value=("header", - con_list)) - scenario._list_objects = mock.MagicMock(return_value=("header", - obj_list)) - scenario._download_object = mock.MagicMock() - - scenario.run() - scenario._list_containers.assert_called_once_with() - con_calls = [mock.call(container["name"]) - for container in con_list] - scenario._list_objects.assert_has_calls(con_calls) - obj_calls = [] - for container in con_list: - for obj in obj_list: - obj_calls.append(mock.call(container["name"], obj["name"])) - scenario._download_object.assert_has_calls(obj_calls, any_order=True) - - def test_functional_create_container_and_object_then_list_objects(self): - names_list = ["AA", "BB", "CC", "DD"] - - scenario = objects.CreateContainerAndObjectThenListObjects( - self.context) - scenario.generate_random_name = mock.MagicMock(side_effect=names_list) - scenario._list_objects = mock.MagicMock() - - scenario.run(objects_per_container=3, object_size=100) - - scenario._list_objects.assert_called_once_with("AA") - - def test_functional_create_container_and_object_then_delete_all(self): - names_list = ["111", "222", "333", "444", "555"] - - scenario = objects.CreateContainerAndObjectThenDeleteAll(self.context) - scenario.generate_random_name = mock.MagicMock(side_effect=names_list) - scenario._delete_object = mock.MagicMock() - scenario._delete_container = mock.MagicMock() - - scenario.run(objects_per_container=4, object_size=240) - - scenario._delete_object.assert_has_calls( - [mock.call("111", name) for name in names_list[1:]]) - scenario._delete_container.assert_called_once_with("111") - - def test_functional_create_container_and_object_then_download_object(self): - names_list = ["aaa", "bbb", "ccc", "ddd", "eee", "fff"] - - scenario = objects.CreateContainerAndObjectThenDownloadObject( - self.context) - scenario.generate_random_name = mock.MagicMock(side_effect=names_list) - scenario._download_object = mock.MagicMock() - - scenario.run(objects_per_container=5, object_size=750) - - scenario._download_object.assert_has_calls( - [mock.call("aaa", name) for name in names_list[1:]]) diff --git a/tests/unit/plugins/openstack/scenarios/swift/test_utils.py b/tests/unit/plugins/openstack/scenarios/swift/test_utils.py deleted file mode 100644 index 500ddab508..0000000000 --- a/tests/unit/plugins/openstack/scenarios/swift/test_utils.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2015: Cisco Systems, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.scenarios.swift import utils -from tests.unit import test - -SWIFT_UTILS = "rally.plugins.openstack.scenarios.swift.utils" - - -@ddt.ddt -class SwiftScenarioTestCase(test.ScenarioTestCase): - - def test__list_containers(self): - headers_dict = mock.MagicMock() - containers_list = mock.MagicMock() - self.clients("swift").get_account.return_value = (headers_dict, - containers_list) - scenario = utils.SwiftScenario(context=self.context) - - self.assertEqual((headers_dict, containers_list), - scenario._list_containers(fargs="f")) - kw = {"full_listing": True, "fargs": "f"} - self.clients("swift").get_account.assert_called_once_with(**kw) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "swift.list_containers") - - @ddt.data( - {}, - {"headers": {"X-fake-name": "fake-value"}}, - {"public": False, - "headers": {"X-fake-name": "fake-value"}}, - {"public": False}) - @ddt.unpack - def test__create_container(self, public=True, kwargs=None, headers=None): - if kwargs is None: - kwargs = {"fakearg": "fake"} - if headers is None: - headers = {} - scenario = utils.SwiftScenario(self.context) - scenario.generate_random_name = mock.MagicMock() - - container = scenario._create_container(public=public, - headers=headers, - **kwargs) - self.assertEqual(container, - scenario.generate_random_name.return_value) - kwargs["headers"] = headers - kwargs["headers"]["X-Container-Read"] = ".r:*,.rlistings" - self.clients("swift").put_container.assert_called_once_with( - scenario.generate_random_name.return_value, - **kwargs) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "swift.create_container") - - def test__delete_container(self): - container_name = mock.MagicMock() - scenario = utils.SwiftScenario(context=self.context) - scenario._delete_container(container_name, fargs="f") - - kw = {"fargs": "f"} - self.clients("swift").delete_container.assert_called_once_with( - container_name, - **kw) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "swift.delete_container") - - def test__list_objects(self): - container_name = mock.MagicMock() - headers_dict = mock.MagicMock() - objects_list = mock.MagicMock() - self.clients("swift").get_container.return_value = (headers_dict, - objects_list) - scenario = utils.SwiftScenario(context=self.context) - - self.assertEqual((headers_dict, objects_list), - scenario._list_objects(container_name, fargs="f")) - kw = {"full_listing": True, "fargs": "f"} - self.clients("swift").get_container.assert_called_once_with( - container_name, - **kw) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "swift.list_objects") - - def test__upload_object(self): - container_name = mock.MagicMock() - content = mock.MagicMock() - etag = mock.MagicMock() - self.clients("swift").put_object.return_value = etag - scenario = utils.SwiftScenario(self.context) - scenario.generate_random_name = mock.MagicMock() - - self.clients("swift").put_object.reset_mock() - self.assertEqual((etag, scenario.generate_random_name.return_value), - scenario._upload_object(container_name, content, - fargs="f")) - kw = {"fargs": "f"} - self.clients("swift").put_object.assert_called_once_with( - container_name, scenario.generate_random_name.return_value, - content, **kw) - self.assertEqual(1, scenario.generate_random_name.call_count) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "swift.upload_object") - - def test__download_object(self): - container_name = mock.MagicMock() - object_name = mock.MagicMock() - headers_dict = mock.MagicMock() - content = mock.MagicMock() - self.clients("swift").get_object.return_value = (headers_dict, content) - scenario = utils.SwiftScenario(context=self.context) - - self.assertEqual((headers_dict, content), - scenario._download_object(container_name, object_name, - fargs="f")) - kw = {"fargs": "f"} - self.clients("swift").get_object.assert_called_once_with( - container_name, object_name, - **kw) - - self._test_atomic_action_timer(scenario.atomic_actions(), - "swift.download_object") - - def test__delete_object(self): - container_name = mock.MagicMock() - object_name = mock.MagicMock() - scenario = utils.SwiftScenario(context=self.context) - scenario._delete_object(container_name, object_name, fargs="f") - - kw = {"fargs": "f"} - self.clients("swift").delete_object.assert_called_once_with( - container_name, object_name, - **kw) - self._test_atomic_action_timer(scenario.atomic_actions(), - "swift.delete_object") diff --git a/tests/unit/plugins/openstack/scenarios/vm/__init__.py b/tests/unit/plugins/openstack/scenarios/vm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/vm/test_utils.py b/tests/unit/plugins/openstack/scenarios/vm/test_utils.py deleted file mode 100644 index 75a8b257da..0000000000 --- a/tests/unit/plugins/openstack/scenarios/vm/test_utils.py +++ /dev/null @@ -1,307 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import subprocess - -import mock -import netaddr - -from rally.common import cfg -from rally.plugins.openstack.scenarios.vm import utils -from tests.unit import test - -VMTASKS_UTILS = "rally.plugins.openstack.scenarios.vm.utils" -CONF = cfg.CONF - - -class VMScenarioTestCase(test.ScenarioTestCase): - - @mock.patch("%s.open" % VMTASKS_UTILS, - side_effect=mock.mock_open(), create=True) - def test__run_command_over_ssh_script_file(self, mock_open): - mock_ssh = mock.MagicMock() - vm_scenario = utils.VMScenario(self.context) - vm_scenario._run_command_over_ssh( - mock_ssh, - { - "script_file": "foobar", - "interpreter": ["interpreter", "interpreter_arg"], - "command_args": ["arg1", "arg2"] - } - ) - mock_ssh.execute.assert_called_once_with( - ["interpreter", "interpreter_arg", "arg1", "arg2"], - stdin=mock_open.side_effect()) - mock_open.assert_called_once_with("foobar", "rb") - - @mock.patch("%s.six.moves.StringIO" % VMTASKS_UTILS) - def test__run_command_over_ssh_script_inline(self, mock_string_io): - mock_ssh = mock.MagicMock() - vm_scenario = utils.VMScenario(self.context) - vm_scenario._run_command_over_ssh( - mock_ssh, - { - "script_inline": "foobar", - "interpreter": ["interpreter", "interpreter_arg"], - "command_args": ["arg1", "arg2"] - } - ) - mock_ssh.execute.assert_called_once_with( - ["interpreter", "interpreter_arg", "arg1", "arg2"], - stdin=mock_string_io.return_value) - mock_string_io.assert_called_once_with("foobar") - - def test__run_command_over_ssh_remote_path(self): - mock_ssh = mock.MagicMock() - vm_scenario = utils.VMScenario(self.context) - vm_scenario._run_command_over_ssh( - mock_ssh, - { - "remote_path": ["foo", "bar"], - "command_args": ["arg1", "arg2"] - } - ) - mock_ssh.execute.assert_called_once_with( - ["foo", "bar", "arg1", "arg2"], - stdin=None) - - def test__run_command_over_ssh_remote_path_copy(self): - mock_ssh = mock.MagicMock() - vm_scenario = utils.VMScenario(self.context) - vm_scenario._run_command_over_ssh( - mock_ssh, - { - "remote_path": ["foo", "bar"], - "local_path": "/bin/false", - "command_args": ["arg1", "arg2"] - } - ) - mock_ssh.put_file.assert_called_once_with( - "/bin/false", "bar", mode=0o755 - ) - mock_ssh.execute.assert_called_once_with( - ["foo", "bar", "arg1", "arg2"], - stdin=None) - - def test__wait_for_ssh(self): - ssh = mock.MagicMock() - vm_scenario = utils.VMScenario(self.context) - vm_scenario._wait_for_ssh(ssh) - ssh.wait.assert_called_once_with(120, 1) - - def test__wait_for_ping(self): - vm_scenario = utils.VMScenario(self.context) - vm_scenario._ping_ip_address = mock.Mock(return_value=True) - vm_scenario._wait_for_ping(netaddr.IPAddress("1.2.3.4")) - self.mock_wait_for_status.mock.assert_called_once_with( - utils.Host("1.2.3.4"), - ready_statuses=[utils.Host.ICMP_UP_STATUS], - update_resource=utils.Host.update_status, - timeout=CONF.openstack.vm_ping_timeout, - check_interval=CONF.openstack.vm_ping_poll_interval) - - @mock.patch(VMTASKS_UTILS + ".VMScenario._run_command_over_ssh") - @mock.patch("rally.common.sshutils.SSH") - def test__run_command(self, mock_sshutils_ssh, - mock_vm_scenario__run_command_over_ssh): - vm_scenario = utils.VMScenario(self.context) - vm_scenario.context = {"user": {"keypair": {"private": "ssh"}}} - vm_scenario._run_command("1.2.3.4", 22, "username", "password", - command={"script_file": "foo", - "interpreter": "bar"}) - - mock_sshutils_ssh.assert_called_once_with( - "username", "1.2.3.4", - port=22, pkey="ssh", password="password") - mock_sshutils_ssh.return_value.wait.assert_called_once_with(120, 1) - mock_vm_scenario__run_command_over_ssh.assert_called_once_with( - mock_sshutils_ssh.return_value, - {"script_file": "foo", "interpreter": "bar"}) - - def get_scenario(self): - server = mock.Mock( - networks={"foo_net": "foo_data"}, - addresses={"foo_net": [{"addr": "foo_ip"}]}, - tenant_id="foo_tenant" - ) - scenario = utils.VMScenario(self.context) - - scenario._boot_server = mock.Mock(return_value=server) - scenario._delete_server = mock.Mock() - scenario._associate_floating_ip = mock.Mock() - scenario._wait_for_ping = mock.Mock() - - return scenario, server - - def test__boot_server_with_fip_without_networks(self): - scenario, server = self.get_scenario() - server.networks = {} - self.assertRaises(RuntimeError, - scenario._boot_server_with_fip, - "foo_image", "foo_flavor", foo_arg="foo_value") - scenario._boot_server.assert_called_once_with( - "foo_image", "foo_flavor", - foo_arg="foo_value", auto_assign_nic=True) - - def test__boot_server_with_fixed_ip(self): - scenario, server = self.get_scenario() - scenario._attach_floating_ip = mock.Mock() - server, ip = scenario._boot_server_with_fip( - "foo_image", "foo_flavor", floating_network="ext_network", - use_floating_ip=False, foo_arg="foo_value") - - self.assertEqual(ip, {"ip": "foo_ip", "id": None, - "is_floating": False}) - scenario._boot_server.assert_called_once_with( - "foo_image", "foo_flavor", - auto_assign_nic=True, foo_arg="foo_value") - self.assertEqual(scenario._attach_floating_ip.mock_calls, []) - - def test__boot_server_with_fip(self): - scenario, server = self.get_scenario() - scenario._attach_floating_ip = mock.Mock( - return_value={"id": "foo_id", "ip": "foo_ip"}) - server, ip = scenario._boot_server_with_fip( - "foo_image", "foo_flavor", floating_network="ext_network", - use_floating_ip=True, foo_arg="foo_value") - self.assertEqual(ip, {"ip": "foo_ip", "id": "foo_id", - "is_floating": True}) - - scenario._boot_server.assert_called_once_with( - "foo_image", "foo_flavor", - auto_assign_nic=True, foo_arg="foo_value") - scenario._attach_floating_ip.assert_called_once_with( - server, "ext_network") - - def test__delete_server_with_fixed_ip(self): - ip = {"ip": "foo_ip", "id": None, "is_floating": False} - scenario, server = self.get_scenario() - scenario._delete_floating_ip = mock.Mock() - scenario._delete_server_with_fip(server, ip, force_delete=True) - - self.assertEqual(scenario._delete_floating_ip.mock_calls, []) - scenario._delete_server.assert_called_once_with(server, force=True) - - def test__delete_server_with_fip(self): - fip = {"ip": "foo_ip", "id": "foo_id", "is_floating": True} - scenario, server = self.get_scenario() - scenario._delete_floating_ip = mock.Mock() - scenario._delete_server_with_fip(server, fip, force_delete=True) - - scenario._delete_floating_ip.assert_called_once_with(server, fip) - scenario._delete_server.assert_called_once_with(server, force=True) - - @mock.patch(VMTASKS_UTILS + ".network_wrapper.wrap") - def test__attach_floating_ip(self, mock_wrap): - scenario, server = self.get_scenario() - - netwrap = mock_wrap.return_value - fip = {"id": "foo_id", "ip": "foo_ip"} - netwrap.create_floating_ip.return_value = fip - - scenario._attach_floating_ip( - server, floating_network="bar_network") - - mock_wrap.assert_called_once_with(scenario.clients, scenario) - netwrap.create_floating_ip.assert_called_once_with( - ext_network="bar_network", - tenant_id="foo_tenant", fixed_ip="foo_ip") - - scenario._associate_floating_ip.assert_called_once_with( - server, fip, fixed_address="foo_ip") - - @mock.patch(VMTASKS_UTILS + ".network_wrapper.wrap") - def test__delete_floating_ip(self, mock_wrap): - scenario, server = self.get_scenario() - - _check_addr = mock.Mock(return_value=True) - scenario.check_ip_address = mock.Mock(return_value=_check_addr) - scenario._dissociate_floating_ip = mock.Mock() - - fip = {"id": "foo_id", "ip": "foo_ip"} - scenario._delete_floating_ip(server, fip=fip) - - scenario.check_ip_address.assert_called_once_with( - "foo_ip") - _check_addr.assert_called_once_with(server) - scenario._dissociate_floating_ip.assert_called_once_with( - server, fip) - mock_wrap.assert_called_once_with(scenario.clients, scenario) - mock_wrap.return_value.delete_floating_ip.assert_called_once_with( - "foo_id", wait=True) - - -class HostTestCase(test.TestCase): - - @mock.patch(VMTASKS_UTILS + ".sys") - @mock.patch("subprocess.Popen") - def test__ping_ip_address_linux(self, mock_popen, mock_sys): - mock_popen.return_value.returncode = 0 - mock_sys.platform = "linux2" - - host = utils.Host("1.2.3.4") - self.assertEqual(utils.Host.ICMP_UP_STATUS, - utils.Host.update_status(host).status) - - mock_popen.assert_called_once_with( - ["ping", "-c1", "-w1", str(host.ip)], - stderr=subprocess.PIPE, stdout=subprocess.PIPE) - mock_popen.return_value.wait.assert_called_once_with() - - @mock.patch(VMTASKS_UTILS + ".sys") - @mock.patch("subprocess.Popen") - def test__ping_ip_address_linux_ipv6(self, mock_popen, mock_sys): - mock_popen.return_value.returncode = 0 - mock_sys.platform = "linux2" - - host = utils.Host("1ce:c01d:bee2:15:a5:900d:a5:11fe") - self.assertEqual(utils.Host.ICMP_UP_STATUS, - utils.Host.update_status(host).status) - - mock_popen.assert_called_once_with( - ["ping6", "-c1", "-w1", str(host.ip)], - stderr=subprocess.PIPE, stdout=subprocess.PIPE) - mock_popen.return_value.wait.assert_called_once_with() - - @mock.patch(VMTASKS_UTILS + ".sys") - @mock.patch("subprocess.Popen") - def test__ping_ip_address_other_os(self, mock_popen, mock_sys): - mock_popen.return_value.returncode = 0 - mock_sys.platform = "freebsd10" - - host = utils.Host("1.2.3.4") - self.assertEqual(utils.Host.ICMP_UP_STATUS, - utils.Host.update_status(host).status) - - mock_popen.assert_called_once_with( - ["ping", "-c1", str(host.ip)], - stderr=subprocess.PIPE, stdout=subprocess.PIPE) - mock_popen.return_value.wait.assert_called_once_with() - - @mock.patch(VMTASKS_UTILS + ".sys") - @mock.patch("subprocess.Popen") - def test__ping_ip_address_other_os_ipv6(self, mock_popen, mock_sys): - mock_popen.return_value.returncode = 0 - mock_sys.platform = "freebsd10" - - host = utils.Host("1ce:c01d:bee2:15:a5:900d:a5:11fe") - self.assertEqual(utils.Host.ICMP_UP_STATUS, - utils.Host.update_status(host).status) - - mock_popen.assert_called_once_with( - ["ping6", "-c1", str(host.ip)], - stderr=subprocess.PIPE, stdout=subprocess.PIPE) - mock_popen.return_value.wait.assert_called_once_with() diff --git a/tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py b/tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py deleted file mode 100644 index d93f417a60..0000000000 --- a/tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py +++ /dev/null @@ -1,422 +0,0 @@ -# Copyright 2013: Rackspace UK -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import ddt -import mock - -from rally.common import validation -from rally import exceptions -from rally.plugins.openstack.scenarios.vm import vmtasks -from tests.unit import test - - -BASE = "rally.plugins.openstack.scenarios.vm.vmtasks" - - -@ddt.ddt -class VMTasksTestCase(test.ScenarioTestCase): - - def setUp(self): - super(VMTasksTestCase, self).setUp() - self.context.update({"user": {"keypair": {"name": "keypair_name"}, - "credential": mock.MagicMock()}}) - - cinder_patcher = mock.patch( - "rally.plugins.openstack.services.storage.block.BlockStorage") - self.cinder = cinder_patcher.start().return_value - self.cinder.create_volume.return_value = mock.Mock(id="foo_volume") - self.addCleanup(cinder_patcher.stop) - - def create_env(self, scenario): - self.ip = {"id": "foo_id", "ip": "foo_ip", "is_floating": True} - scenario._boot_server_with_fip = mock.Mock( - return_value=("foo_server", self.ip)) - scenario._wait_for_ping = mock.Mock() - scenario._delete_server_with_fip = mock.Mock() - scenario._run_command = mock.MagicMock( - return_value=(0, "{\"foo\": 42}", "foo_err")) - scenario.add_output = mock.Mock() - return scenario - - def test_boot_runcommand_delete(self): - scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context)) - scenario._run_command = mock.MagicMock( - return_value=(0, "{\"foo\": 42}", "foo_err")) - scenario.run("foo_flavor", image="foo_image", - command={"script_file": "foo_script", - "interpreter": "foo_interpreter"}, - username="foo_username", - password="foo_password", - use_floating_ip="use_fip", - floating_network="ext_network", - force_delete="foo_force", - volume_args={"size": 16}, - foo_arg="foo_value") - - self.cinder.create_volume.assert_called_once_with(16, imageRef=None) - scenario._boot_server_with_fip.assert_called_once_with( - "foo_image", "foo_flavor", key_name="keypair_name", - use_floating_ip="use_fip", floating_network="ext_network", - block_device_mapping={"vdrally": "foo_volume:::1"}, - foo_arg="foo_value") - - scenario._wait_for_ping.assert_called_once_with("foo_ip") - scenario._run_command.assert_called_once_with( - "foo_ip", 22, "foo_username", "foo_password", - command={"script_file": "foo_script", - "interpreter": "foo_interpreter"}) - scenario._delete_server_with_fip.assert_called_once_with( - "foo_server", self.ip, force_delete="foo_force") - scenario.add_output.assert_called_once_with( - complete={"chart_plugin": "TextArea", - "data": [ - "StdErr: foo_err", - "StdOut:", - "{\"foo\": 42}"], - "title": "Script Output"}) - - @ddt.data( - {"output": (0, "", ""), - "expected": [{"complete": {"chart_plugin": "TextArea", - "data": [ - "StdErr: (none)", - "StdOut:", - ""], - "title": "Script Output"}}]}, - {"output": (1, "{\"foo\": 42}", ""), "raises": exceptions.ScriptError}, - {"output": ("", 1, ""), "raises": TypeError}, - {"output": (0, "{\"foo\": 42}", ""), - "expected": [{"complete": {"chart_plugin": "TextArea", - "data": [ - "StdErr: (none)", - "StdOut:", - "{\"foo\": 42}"], - "title": "Script Output"}}]}, - {"output": (0, "{\"additive\": [1, 2]}", ""), - "expected": [{"complete": {"chart_plugin": "TextArea", - "data": [ - "StdErr: (none)", - "StdOut:", "{\"additive\": [1, 2]}"], - "title": "Script Output"}}]}, - {"output": (0, "{\"complete\": [3, 4]}", ""), - "expected": [{"complete": {"chart_plugin": "TextArea", - "data": [ - "StdErr: (none)", - "StdOut:", - "{\"complete\": [3, 4]}"], - "title": "Script Output"}}]}, - {"output": (0, "{\"additive\": [1, 2], \"complete\": [3, 4]}", ""), - "expected": [{"additive": 1}, {"additive": 2}, - {"complete": 3}, {"complete": 4}]} - ) - @ddt.unpack - def test_boot_runcommand_delete_add_output(self, output, - expected=None, raises=None): - scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context)) - - scenario._run_command.return_value = output - kwargs = {"flavor": "foo_flavor", - "image": "foo_image", - "command": {"remote_path": "foo"}, - "username": "foo_username", - "password": "foo_password", - "use_floating_ip": "use_fip", - "floating_network": "ext_network", - "force_delete": "foo_force", - "volume_args": {"size": 16}, - "foo_arg": "foo_value"} - if raises: - self.assertRaises(raises, scenario.run, **kwargs) - self.assertFalse(scenario.add_output.called) - else: - scenario.run(**kwargs) - calls = [mock.call(**kw) for kw in expected] - scenario.add_output.assert_has_calls(calls, any_order=True) - - self.cinder.create_volume.assert_called_once_with( - 16, imageRef=None) - scenario._boot_server_with_fip.assert_called_once_with( - "foo_image", "foo_flavor", key_name="keypair_name", - use_floating_ip="use_fip", floating_network="ext_network", - block_device_mapping={"vdrally": "foo_volume:::1"}, - foo_arg="foo_value") - - scenario._run_command.assert_called_once_with( - "foo_ip", 22, "foo_username", "foo_password", - command={"remote_path": "foo"}) - scenario._delete_server_with_fip.assert_called_once_with( - "foo_server", self.ip, force_delete="foo_force") - - def test_boot_runcommand_delete_command_timeouts(self): - scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context)) - - scenario._run_command.side_effect = exceptions.SSHTimeout() - self.assertRaises(exceptions.SSHTimeout, - scenario.run, - "foo_flavor", "foo_image", "foo_interpreter", - "foo_script", "foo_username") - scenario._delete_server_with_fip.assert_called_once_with( - "foo_server", self.ip, force_delete=False) - self.assertFalse(scenario.add_output.called) - - def test_boot_runcommand_delete_ping_wait_timeouts(self): - scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context)) - - scenario._wait_for_ping.side_effect = exceptions.TimeoutException( - resource_type="foo_resource", - resource_name="foo_name", - resource_id="foo_id", - desired_status="foo_desired_status", - resource_status="foo_resource_status", - timeout=2) - exc = self.assertRaises(exceptions.TimeoutException, - scenario.run, - "foo_image", "foo_flavor", "foo_interpreter", - "foo_script", "foo_username", - wait_for_ping=True) - self.assertEqual(exc.kwargs["resource_type"], "foo_resource") - self.assertEqual(exc.kwargs["resource_name"], "foo_name") - self.assertEqual(exc.kwargs["resource_id"], "foo_id") - self.assertEqual(exc.kwargs["desired_status"], "foo_desired_status") - self.assertEqual(exc.kwargs["resource_status"], "foo_resource_status") - - scenario._delete_server_with_fip.assert_called_once_with( - "foo_server", self.ip, force_delete=False) - self.assertFalse(scenario.add_output.called) - - @mock.patch("%s.json" % BASE) - def test_boot_runcommand_delete_json_fails(self, mock_json): - scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context)) - - mock_json.loads.side_effect = ValueError() - scenario.run("foo_image", "foo_flavor", "foo_interpreter", - "foo_script", "foo_username") - scenario.add_output.assert_called_once_with(complete={ - "chart_plugin": "TextArea", "data": ["StdErr: foo_err", - "StdOut:", "{\"foo\": 42}"], - "title": "Script Output"}) - scenario._delete_server_with_fip.assert_called_once_with( - "foo_server", self.ip, force_delete=False) - - def test_boot_runcommand_delete_custom_image(self): - context = { - "user": { - "tenant_id": "tenant_id", - "keypair": {"name": "foo_keypair_name"}, - "credential": mock.Mock() - }, - "tenant": { - "custom_image": {"id": "image_id"} - } - } - - scenario = self.create_env(vmtasks.BootRuncommandDelete(context)) - scenario._run_command = mock.MagicMock( - return_value=(0, "{\"foo\": 42}", "foo_err")) - scenario.run("foo_flavor", - command={"script_file": "foo_script", - "interpreter": "foo_interpreter"}, - username="foo_username", - password="foo_password", - use_floating_ip="use_fip", - floating_network="ext_network", - force_delete="foo_force", - volume_args={"size": 16}, - foo_arg="foo_value") - - self.cinder.create_volume.assert_called_once_with(16, imageRef=None) - scenario._boot_server_with_fip.assert_called_once_with( - "image_id", "foo_flavor", key_name="foo_keypair_name", - use_floating_ip="use_fip", floating_network="ext_network", - block_device_mapping={"vdrally": "foo_volume:::1"}, - foo_arg="foo_value") - - scenario._wait_for_ping.assert_called_once_with("foo_ip") - scenario._run_command.assert_called_once_with( - "foo_ip", 22, "foo_username", "foo_password", - command={"script_file": "foo_script", - "interpreter": "foo_interpreter"}) - scenario._delete_server_with_fip.assert_called_once_with( - "foo_server", self.ip, force_delete="foo_force") - scenario.add_output.assert_called_once_with( - complete={"chart_plugin": "TextArea", - "data": [ - "StdErr: foo_err", - "StdOut:", "{\"foo\": 42}"], - "title": "Script Output"}) - - @mock.patch("%s.heat" % BASE) - @mock.patch("%s.sshutils" % BASE) - def test_runcommand_heat(self, mock_sshutils, mock_heat): - fake_ssh = mock.Mock() - fake_ssh.execute.return_value = [0, "key:val", ""] - mock_sshutils.SSH.return_value = fake_ssh - fake_stack = mock.Mock() - fake_stack.stack.outputs = [{"output_key": "gate_node", - "output_value": "ok"}] - mock_heat.main.Stack.return_value = fake_stack - context = { - "user": {"keypair": {"name": "name", "private": "pk"}, - "credential": mock.MagicMock()}, - "tenant": {"networks": [{"router_id": "1"}]} - } - scenario = vmtasks.RuncommandHeat(context) - scenario.generate_random_name = mock.Mock(return_value="name") - scenario.add_output = mock.Mock() - workload = {"username": "admin", - "resource": ["foo", "bar"]} - scenario.run(workload, "template", - {"file_key": "file_value"}, - {"param_key": "param_value"}) - expected = {"chart_plugin": "Table", - "data": {"rows": [["key", "val"]], - "cols": ["key", "value"]}, - "description": "Data generated by workload", - "title": "Workload summary"} - scenario.add_output.assert_called_once_with(complete=expected) - - -@ddt.ddt -class ValidCommandValidatorTestCase(test.TestCase): - - def setUp(self): - super(ValidCommandValidatorTestCase, self).setUp() - self.context = {"admin": {"credential": mock.MagicMock()}, - "users": [{"credential": mock.MagicMock()}]} - - @ddt.data({"command": {"script_inline": "foobar", - "interpreter": ["ENV=bar", "/bin/foo"], - "local_path": "bar", - "remote_path": "/bin/foo"}}, - {"command": {"script_inline": "foobar", "interpreter": "foo"}}) - @ddt.unpack - def test_check_command_dict(self, command=None): - validator = vmtasks.ValidCommandValidator(param_name="p", - required=True) - self.assertIsNone(validator.check_command_dict(command)) - - @ddt.data({"raises_message": "Command must be a dictionary"}, - {"command": "foo", - "raises_message": "Command must be a dictionary"}, - {"command": {"interpreter": "foobar", "script_file": "foo", - "script_inline": "bar"}, - "raises_message": "Exactly one of "}, - {"command": {"script_file": "foobar"}, - "raises_message": "Supplied dict specifies no"}, - {"command": {"script_inline": "foobar", - "interpreter": "foo", - "local_path": "bar"}, - "raises_message": "When uploading an interpreter its path"}, - {"command": {"interpreter": "/bin/bash", - "script_path": "foo"}, - "raises_message": ("Unexpected command parameters: " - "script_path")}) - @ddt.unpack - def test_check_command_dict_failed( - self, command=None, raises_message=None): - validator = vmtasks.ValidCommandValidator(param_name="p", - required=True) - e = self.assertRaises( - validation.ValidationError, - validator.check_command_dict, command) - self.assertIn(raises_message, e.message) - - @mock.patch("rally.plugins.common.validators.FileExistsValidator" - "._file_access_ok") - def test_validate(self, mock__file_access_ok): - validator = vmtasks.ValidCommandValidator(param_name="p", - required=True) - mock__file_access_ok.return_value = None - command = {"script_file": "foobar", "interpreter": "foo"} - result = validator.validate(self.context, {"args": {"p": command}}, - None, None) - self.assertIsNone(result) - mock__file_access_ok.assert_called_once_with( - filename="foobar", mode=os.R_OK, param_name="p", - required=True) - - def test_valid_command_not_required(self): - validator = vmtasks.ValidCommandValidator(param_name="p", - required=False) - result = validator.validate(self.context, {"args": {"p": None}}, - None, None) - self.assertIsNone(result) - - def test_valid_command_required(self): - validator = vmtasks.ValidCommandValidator(param_name="p", - required=True) - - e = self.assertRaises( - validation.ValidationError, - validator.validate, {"args": {"p": None}}, - self.context, None, None) - self.assertEqual("Command must be a dictionary", e.message) - - @mock.patch("rally.plugins.common.validators.FileExistsValidator" - "._file_access_ok") - def test_valid_command_unreadable_script_file(self, mock__file_access_ok): - mock__file_access_ok.side_effect = validation.ValidationError("O_o") - - validator = vmtasks.ValidCommandValidator(param_name="p", - required=True) - - command = {"script_file": "foobar", "interpreter": "foo"} - e = self.assertRaises( - validation.ValidationError, - validator.validate, self.context, {"args": {"p": command}}, - None, None) - self.assertEqual("O_o", e.message) - - @mock.patch("%s.ValidCommandValidator.check_command_dict" % BASE) - def test_valid_command_fail_check_command_dict(self, - mock_check_command_dict): - validator = vmtasks.ValidCommandValidator(param_name="p", - required=True) - - mock_check_command_dict.side_effect = validation.ValidationError( - "foobar") - e = self.assertRaises( - validation.ValidationError, - validator.validate, {"args": {"p": {"foo": "bar"}}}, - self.context, None, None) - self.assertEqual("foobar", e.message) - - def test_valid_command_script_inline(self): - validator = vmtasks.ValidCommandValidator(param_name="p", - required=True) - - command = {"script_inline": "bar", "interpreter": "/bin/sh"} - result = validator.validate(self.context, {"args": {"p": command}}, - None, None) - self.assertIsNone(result) - - @mock.patch("rally.plugins.common.validators.FileExistsValidator" - "._file_access_ok") - def test_valid_command_local_path(self, mock__file_access_ok): - mock__file_access_ok.side_effect = validation.ValidationError("") - - validator = vmtasks.ValidCommandValidator(param_name="p", - required=True) - - command = {"remote_path": "bar", "local_path": "foobar"} - self.assertRaises( - validation.ValidationError, - validator.validate, self.context, {"args": {"p": command}}, - None, None) - mock__file_access_ok.assert_called_once_with( - filename="foobar", mode=os.R_OK, param_name="p", - required=True) diff --git a/tests/unit/plugins/openstack/scenarios/watcher/__init__.py b/tests/unit/plugins/openstack/scenarios/watcher/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/watcher/test_basic.py b/tests/unit/plugins/openstack/scenarios/watcher/test_basic.py deleted file mode 100644 index a83b6dfb6c..0000000000 --- a/tests/unit/plugins/openstack/scenarios/watcher/test_basic.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2016: Servionica LTD. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.watcher import basic -from tests.unit import test - - -class WatcherTestCase(test.ScenarioTestCase): - - def test_create_audit_template_and_delete(self): - scenario = basic.CreateAuditTemplateAndDelete(self.context) - audit_template = mock.Mock() - scenario._create_audit_template = mock.MagicMock( - return_value=audit_template) - scenario._delete_audit_template = mock.MagicMock() - scenario.run("goal", "strategy") - scenario._create_audit_template.assert_called_once_with("goal", - "strategy") - scenario._delete_audit_template.assert_called_once_with( - audit_template.uuid) - - def test_list_audit_template(self): - scenario = basic.ListAuditTemplates(self.context) - scenario._list_audit_templates = mock.MagicMock() - scenario.run() - scenario._list_audit_templates.assert_called_once_with( - detail=False, goal=None, limit=None, name=None, sort_dir=None, - sort_key=None, strategy=None) - - def test_create_audit_and_delete(self): - mock_audit = mock.MagicMock() - scenario = basic.CreateAuditAndDelete(self.context) - scenario.context = mock.MagicMock() - scenario._create_audit = mock.MagicMock(return_value=mock_audit) - scenario.sleep_between = mock.MagicMock() - scenario._delete_audit = mock.MagicMock() - scenario.run() - scenario._create_audit.assert_called_once_with(mock.ANY) - scenario._delete_audit.assert_called_once_with(mock_audit) diff --git a/tests/unit/plugins/openstack/scenarios/watcher/test_utils.py b/tests/unit/plugins/openstack/scenarios/watcher/test_utils.py deleted file mode 100644 index 6e3dc3b3bd..0000000000 --- a/tests/unit/plugins/openstack/scenarios/watcher/test_utils.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2016: Servionica LTD. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import cfg -from rally.plugins.openstack.scenarios.watcher import utils -from tests.unit import test - -CONF = cfg.CONF - - -class WatcherScenarioTestCase(test.ScenarioTestCase): - - def test_create_audit_template(self): - watcher_scenario = utils.WatcherScenario(self.context) - watcher_scenario.generate_random_name = mock.MagicMock( - return_value="mock_name") - watcher_scenario._create_audit_template("fake_goal", "fake_strategy") - self.admin_clients( - "watcher").audit_template.create.assert_called_once_with( - goal="fake_goal", strategy="fake_strategy", - name="mock_name") - self._test_atomic_action_timer(watcher_scenario.atomic_actions(), - "watcher.create_audit_template") - - def test_list_audit_templates(self): - audit_templates_list = [] - watcher_scenario = utils.WatcherScenario(self.context) - self.admin_clients( - "watcher").audit_template.list.return_value = audit_templates_list - return_audit_templates_list = watcher_scenario._list_audit_templates() - self.assertEqual(audit_templates_list, return_audit_templates_list) - self._test_atomic_action_timer(watcher_scenario.atomic_actions(), - "watcher.list_audit_templates") - - def test_delete_audit_template(self): - watcher_scenario = utils.WatcherScenario(self.context) - watcher_scenario._delete_audit_template("fake_audit_template") - self.admin_clients( - "watcher").audit_template.delete.assert_called_once_with( - "fake_audit_template") - self._test_atomic_action_timer(watcher_scenario.atomic_actions(), - "watcher.delete_audit_template") - - def test_create_audit(self): - mock_audit_template = mock.Mock() - watcher_scenario = utils.WatcherScenario(self.context) - audit = watcher_scenario._create_audit(mock_audit_template) - self.mock_wait_for_status.mock.assert_called_once_with( - audit, - ready_statuses=["SUCCEEDED"], - failure_statuses=["FAILED"], - status_attr="state", - update_resource=self.mock_get_from_manager.mock.return_value, - check_interval=CONF.openstack.watcher_audit_launch_poll_interval, - timeout=CONF.openstack.watcher_audit_launch_timeout, - id_attr="uuid") - self.mock_get_from_manager.mock.assert_called_once_with() - self.admin_clients("watcher").audit.create.assert_called_once_with( - audit_template_uuid=mock_audit_template, audit_type="ONESHOT") - self._test_atomic_action_timer(watcher_scenario.atomic_actions(), - "watcher.create_audit") - - def test_delete_audit(self): - mock_audit = mock.Mock() - watcher_scenario = utils.WatcherScenario(self.context) - watcher_scenario._delete_audit(mock_audit) - self.admin_clients("watcher").audit.delete.assert_called_once_with( - mock_audit.uuid) - self._test_atomic_action_timer(watcher_scenario.atomic_actions(), - "watcher.delete_audit") diff --git a/tests/unit/plugins/openstack/scenarios/zaqar/__init__.py b/tests/unit/plugins/openstack/scenarios/zaqar/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/scenarios/zaqar/test_basic.py b/tests/unit/plugins/openstack/scenarios/zaqar/test_basic.py deleted file mode 100644 index db4a0a80f8..0000000000 --- a/tests/unit/plugins/openstack/scenarios/zaqar/test_basic.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.zaqar import basic -from tests.unit import test - -BASE = "rally.plugins.openstack.scenarios.zaqar.basic" - - -class ZaqarBasicTestCase(test.ScenarioTestCase): - - @mock.patch("%s.CreateQueue.generate_random_name" % BASE, - return_value="fizbit") - def test_create_queue(self, mock_random_name): - scenario = basic.CreateQueue(self.context) - scenario._queue_create = mock.MagicMock() - scenario.run(fakearg="fake") - scenario._queue_create.assert_called_once_with(fakearg="fake") - - @mock.patch("%s.CreateQueue.generate_random_name" % BASE, - return_value="kitkat") - def test_producer_consumer(self, mock_random_name): - scenario = basic.ProducerConsumer(self.context) - messages = [{"body": {"id": idx}, "ttl": 360} for idx - in range(20)] - queue = mock.MagicMock() - - scenario._queue_create = mock.MagicMock(return_value=queue) - scenario._messages_post = mock.MagicMock() - scenario._messages_list = mock.MagicMock() - scenario._queue_delete = mock.MagicMock() - - scenario.run(min_msg_count=20, max_msg_count=20, fakearg="fake") - - scenario._queue_create.assert_called_once_with(fakearg="fake") - scenario._messages_post.assert_called_once_with(queue, messages, - 20, 20) - scenario._messages_list.assert_called_once_with(queue) - scenario._queue_delete.assert_called_once_with(queue) diff --git a/tests/unit/plugins/openstack/scenarios/zaqar/test_utils.py b/tests/unit/plugins/openstack/scenarios/zaqar/test_utils.py deleted file mode 100644 index c762c1562d..0000000000 --- a/tests/unit/plugins/openstack/scenarios/zaqar/test_utils.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.scenarios.zaqar import utils -from tests.unit import fakes -from tests.unit import test - -UTILS = "rally.plugins.openstack.scenarios.zaqar.utils." - - -class ZaqarScenarioTestCase(test.ScenarioTestCase): - - @mock.patch(UTILS + "ZaqarScenario.generate_random_name", - return_value="kitkat") - def test_queue_create(self, mock_generate_random_name): - scenario = utils.ZaqarScenario(self.context) - result = scenario._queue_create(fakearg="fakearg") - - self.assertEqual(self.clients("zaqar").queue.return_value, result) - self.clients("zaqar").queue.assert_called_once_with("kitkat", - fakearg="fakearg") - self._test_atomic_action_timer(scenario.atomic_actions(), - "zaqar.create_queue") - - def test_queue_delete(self): - queue = fakes.FakeQueue() - queue.delete = mock.MagicMock() - - scenario = utils.ZaqarScenario(context=self.context) - scenario._queue_delete(queue) - queue.delete.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "zaqar.delete_queue") - - def test_messages_post(self): - queue = fakes.FakeQueue() - queue.post = mock.MagicMock() - - messages = [{"body": {"id": "one"}, "ttl": 100}, - {"body": {"id": "two"}, "ttl": 120}, - {"body": {"id": "three"}, "ttl": 140}] - min_msg_count = max_msg_count = len(messages) - - scenario = utils.ZaqarScenario(context=self.context) - scenario._messages_post(queue, messages, min_msg_count, max_msg_count) - queue.post.assert_called_once_with(messages) - - def test_messages_list(self): - queue = fakes.FakeQueue() - queue.messages = mock.MagicMock() - - scenario = utils.ZaqarScenario(context=self.context) - scenario._messages_list(queue) - queue.messages.assert_called_once_with() - self._test_atomic_action_timer(scenario.atomic_actions(), - "zaqar.list_messages") diff --git a/tests/unit/plugins/openstack/services/__init__.py b/tests/unit/plugins/openstack/services/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/services/gnocchi/__init__.py b/tests/unit/plugins/openstack/services/gnocchi/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/services/gnocchi/test_metric.py b/tests/unit/plugins/openstack/services/gnocchi/test_metric.py deleted file mode 100644 index 297e888b5b..0000000000 --- a/tests/unit/plugins/openstack/services/gnocchi/test_metric.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.services.gnocchi import metric -from tests.unit import test - - -class GnocchiServiceTestCase(test.TestCase): - def setUp(self): - super(GnocchiServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.name_generator = mock.MagicMock() - self.service = metric.GnocchiService( - self.clients, - name_generator=self.name_generator) - - def atomic_actions(self): - return self.service._atomic_actions - - def test__create_archive_policy(self): - definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}] - aggregation_methods = [ - "std", "count", "95pct", "min", "max", "sum", "median", "mean"] - archive_policy = {"name": "fake_name"} - archive_policy["definition"] = definition - archive_policy["aggregation_methods"] = aggregation_methods - - self.assertEqual( - self.service.create_archive_policy( - name="fake_name", - definition=definition, - aggregation_methods=aggregation_methods), - self.service._clients.gnocchi().archive_policy.create( - archive_policy) - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.create_archive_policy") - - def test__delete_archive_policy(self): - self.service.delete_archive_policy("fake_name") - self.service._clients.gnocchi().archive_policy.delete \ - .assert_called_once_with("fake_name") - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.delete_archive_policy") - - def test__list_archive_policy(self): - self.assertEqual( - self.service.list_archive_policy(), - self.service._clients.gnocchi().archive_policy.list.return_value - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.list_archive_policy") - - def test__create_archive_policy_rule(self): - archive_policy_rule = {"name": "fake_name"} - archive_policy_rule["metric_pattern"] = "cpu_*" - archive_policy_rule["archive_policy_name"] = "low" - - self.assertEqual( - self.service.create_archive_policy_rule( - name="fake_name", - metric_pattern="cpu_*", - archive_policy_name="low"), - self.service._clients.gnocchi().archive_policy_rule.create( - archive_policy_rule) - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.create_archive_policy_rule") - - def test__delete_archive_policy_rule(self): - self.service.delete_archive_policy_rule("fake_name") - self.service._clients.gnocchi().archive_policy_rule \ - .delete.assert_called_once_with("fake_name") - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.delete_archive_policy_rule") - - def test__list_archive_policy_rule(self): - self.assertEqual( - self.service.list_archive_policy_rule(), - self.service._clients.gnocchi().archive_policy_rule.list - .return_value - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.list_archive_policy_rule") - - def test__list_capabilities(self): - self.assertEqual( - self.service.list_capabilities(), - self.service._clients.gnocchi().capabilities.list.return_value - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.list_capabilities") - - def test__get_measures_aggregation(self): - self.assertEqual( - self.service.get_measures_aggregation( - metrics=[1], - aggregation="mean", - refresh=False), - self.service._clients.gnocchi().metric.aggregation( - [1], "mean", False) - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.get_measures_aggregation") - - def test__get_measures(self): - self.assertEqual( - self.service.get_measures( - metric=1, - aggregation="mean", - refresh=False), - self.service._clients.gnocchi().metric.get_measures( - 1, "mean", False) - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.get_measures") - - def test__create_metric(self): - metric = {"name": "fake_name"} - metric["archive_policy_name"] = "fake_archive_policy" - metric["unit"] = "fake_unit" - metric["resource_id"] = "fake_resource_id" - self.assertEqual( - self.service.create_metric( - name="fake_name", - archive_policy_name="fake_archive_policy", - unit="fake_unit", - resource_id="fake_resource_id"), - self.service._clients.gnocchi().metric.create(metric) - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.create_metric") - - def test__delete_metric(self): - self.service.delete_metric("fake_metric_id") - self.service._clients.gnocchi().metric.delete.assert_called_once_with( - "fake_metric_id") - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.delete_metric") - - def test__list_metric(self): - self.assertEqual( - self.service.list_metric(), - self.service._clients.gnocchi().metric.list.return_value - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.list_metric") - - def test__create_resource(self): - resource = {"id": "11111"} - self.assertEqual( - self.service.create_resource("fake_type"), - self.service._clients.gnocchi().resource.create( - "fake_type", resource) - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.create_resource") - - def test__delete_resource(self): - self.service.delete_resource("fake_resource_id") - self.service._clients.gnocchi().resource.delete \ - .assert_called_once_with("fake_resource_id") - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.delete_resource") - - def test__list_resource(self): - self.assertEqual( - self.service.list_resource(), - self.service._clients.gnocchi().resource.list.return_value - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.list_resource") - - def test__create_resource_type(self): - resource_type = {"name": "fake_name"} - self.assertEqual( - self.service.create_resource_type("fake_name"), - self.service._clients.gnocchi().resource_type.create(resource_type) - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.create_resource_type") - - def test__delete_resource_type(self): - self.service.delete_resource_type("fake_resource_name") - self.service._clients.gnocchi().resource_type.delete \ - .assert_called_once_with("fake_resource_name") - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.delete_resource_type") - - def test__list_resource_type(self): - self.assertEqual( - self.service.list_resource_type(), - self.service._clients.gnocchi().resource_type.list.return_value - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.list_resource_type") - - def test__get_status(self,): - self.assertEqual( - self.service.get_status(), - self.service._clients.gnocchi().status.get.return_value - ) - self._test_atomic_action_timer(self.atomic_actions(), - "gnocchi.get_status") diff --git a/tests/unit/plugins/openstack/services/heat/__init__.py b/tests/unit/plugins/openstack/services/heat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/services/heat/test_main.py b/tests/unit/plugins/openstack/services/heat/test_main.py deleted file mode 100644 index 5c00255612..0000000000 --- a/tests/unit/plugins/openstack/services/heat/test_main.py +++ /dev/null @@ -1,106 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.services.heat import main -from tests.unit import test - - -class Stack(main.Stack): - def __init__(self): - self.scenario = mock.Mock() - - -class StackTestCase(test.ScenarioTestCase): - - @mock.patch("rally.plugins.openstack.services.heat.main.open", - create=True) - def test___init__(self, mock_open): - reads = [mock.Mock(), mock.Mock()] - reads[0].read.return_value = "template_contents" - reads[1].read.return_value = "file1_contents" - mock_open.side_effect = reads - stack = main.Stack("scenario", "task", "template", - parameters="parameters", - files={"f1_name": "f1_path"}) - self.assertEqual("template_contents", stack.template) - self.assertEqual({"f1_name": "file1_contents"}, stack.files) - self.assertEqual([mock.call("template"), mock.call("f1_path")], - mock_open.mock_calls) - reads[0].read.assert_called_once_with() - reads[1].read.assert_called_once_with() - - @mock.patch("rally.plugins.openstack.services.heat.main.utils") - def test__wait(self, mock_utils): - fake_stack = mock.Mock() - stack = Stack() - stack.stack = fake_stack = mock.Mock() - stack._wait(["ready_statuses"], ["failure_statuses"]) - mock_utils.wait_for_status.assert_called_once_with( - fake_stack, check_interval=1.0, - ready_statuses=["ready_statuses"], - failure_statuses=["failure_statuses"], - timeout=3600.0, - update_resource=mock_utils.get_from_manager()) - - @mock.patch("rally.task.atomic") - @mock.patch("rally.plugins.openstack.services.heat.main.open") - @mock.patch("rally.plugins.openstack.services.heat.main.Stack._wait") - def test_create(self, mock_stack__wait, mock_open, mock_task_atomic): - mock_scenario = mock.MagicMock(_atomic_actions=[]) - mock_scenario.generate_random_name.return_value = "fake_name" - mock_open().read.return_value = "fake_content" - mock_new_stack = { - "stack": { - "id": "fake_id" - } - } - mock_scenario.clients("heat").stacks.create.return_value = ( - mock_new_stack) - - stack = main.Stack( - scenario=mock_scenario, task=mock.Mock(), - template=mock.Mock(), files={} - ) - stack.create() - mock_scenario.clients("heat").stacks.create.assert_called_once_with( - files={}, parameters=None, stack_name="fake_name", - template="fake_content" - ) - mock_scenario.clients("heat").stacks.get.assert_called_once_with( - "fake_id") - mock_stack__wait.assert_called_once_with(["CREATE_COMPLETE"], - ["CREATE_FAILED"]) - - @mock.patch("rally.task.atomic") - @mock.patch("rally.plugins.openstack.services.heat.main.open") - @mock.patch("rally.plugins.openstack.services.heat.main.Stack._wait") - def test_update(self, mock_stack__wait, mock_open, mock_task_atomic): - mock_scenario = mock.MagicMock( - stack_id="fake_id", _atomic_actions=[]) - mock_parameters = mock.Mock() - mock_open().read.return_value = "fake_content" - stack = main.Stack( - scenario=mock_scenario, task=mock.Mock(), - template=None, files={}, parameters=mock_parameters - ) - stack.stack_id = "fake_id" - stack.parameters = mock_parameters - stack.update({"foo": "bar"}) - mock_scenario.clients("heat").stacks.update.assert_called_once_with( - "fake_id", files={}, template="fake_content", - parameters=mock_parameters - ) - mock_stack__wait.assert_called_once_with(["UPDATE_COMPLETE"], - ["UPDATE_FAILED"]) diff --git a/tests/unit/plugins/openstack/services/identity/__init__.py b/tests/unit/plugins/openstack/services/identity/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/services/identity/test_identity.py b/tests/unit/plugins/openstack/services/identity/test_identity.py deleted file mode 100644 index 86fac12872..0000000000 --- a/tests/unit/plugins/openstack/services/identity/test_identity.py +++ /dev/null @@ -1,257 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.services.identity import identity -from tests.unit import test - - -@ddt.ddt -class IdentityTestCase(test.TestCase): - def setUp(self): - super(IdentityTestCase, self).setUp() - self.clients = mock.MagicMock() - - def get_service_with_fake_impl(self): - path = "rally.plugins.openstack.services.identity.identity" - with mock.patch("%s.Identity.discover_impl" % path) as mock_discover: - mock_discover.return_value = mock.MagicMock(), None - service = identity.Identity(self.clients) - return service - - def test_create_project(self): - service = self.get_service_with_fake_impl() - project_name = "name" - domain_name = "domain" - service.create_project(project_name, domain_name=domain_name) - service._impl.create_project.assert_called_once_with( - project_name, domain_name=domain_name) - - def test_update_project(self): - service = self.get_service_with_fake_impl() - - project_id = "id" - project_name = "name" - description = "descr" - enabled = False - service.update_project(project_id=project_id, name=project_name, - description=description, enabled=enabled) - service._impl.update_project.assert_called_once_with( - project_id, name=project_name, description=description, - enabled=enabled) - - def test_delete_project(self): - service = self.get_service_with_fake_impl() - project = "id" - service.delete_project(project) - service._impl.delete_project.assert_called_once_with(project) - - def test_list_projects(self): - service = self.get_service_with_fake_impl() - service.list_projects() - service._impl.list_projects.assert_called_once_with() - - def test_get_project(self): - service = self.get_service_with_fake_impl() - project = "id" - service.get_project(project) - service._impl.get_project.assert_called_once_with(project) - - def test_create_user(self): - service = self.get_service_with_fake_impl() - - username = "username" - password = "password" - project_id = "project_id" - domain_name = "domain_name" - - service.create_user(username=username, password=password, - project_id=project_id, domain_name=domain_name) - service._impl.create_user.assert_called_once_with( - username=username, password=password, project_id=project_id, - domain_name=domain_name, default_role="member") - - def test_create_users(self): - service = self.get_service_with_fake_impl() - - project_id = "project_id" - n = 3 - user_create_args = {} - - service.create_users(project_id, number_of_users=n, - user_create_args=user_create_args) - service._impl.create_users.assert_called_once_with( - project_id, number_of_users=n, user_create_args=user_create_args) - - def test_delete_user(self): - service = self.get_service_with_fake_impl() - user_id = "fake_id" - service.delete_user(user_id) - service._impl.delete_user.assert_called_once_with(user_id) - - def test_list_users(self): - service = self.get_service_with_fake_impl() - service.list_users() - service._impl.list_users.assert_called_once_with() - - def test_update_user(self): - service = self.get_service_with_fake_impl() - - user_id = "id" - user_name = "name" - email = "mail" - password = "pass" - enabled = False - service.update_user(user_id, name=user_name, password=password, - email=email, enabled=enabled) - service._impl.update_user.assert_called_once_with( - user_id, name=user_name, password=password, email=email, - enabled=enabled) - - def test_get_user(self): - service = self.get_service_with_fake_impl() - user = "id" - service.get_user(user) - service._impl.get_user.assert_called_once_with(user) - - def test_create_service(self): - service = self.get_service_with_fake_impl() - - service_name = "name" - service_type = "service_type" - description = "descr" - service.create_service(service_name, service_type=service_type, - description=description) - service._impl.create_service.assert_called_once_with( - name=service_name, service_type=service_type, - description=description) - - def test_delete_service(self): - service = self.get_service_with_fake_impl() - service_id = "id" - - service.delete_service(service_id) - service._impl.delete_service.assert_called_once_with(service_id) - - def test_list_services(self): - service = self.get_service_with_fake_impl() - service.list_services() - service._impl.list_services.assert_called_once_with() - - def test_get_service(self): - service = self.get_service_with_fake_impl() - service_id = "id" - service.get_service(service_id) - service._impl.get_service.assert_called_once_with(service_id) - - def test_get_service_by_name(self): - service = self.get_service_with_fake_impl() - service_name = "name" - service.get_service_by_name(service_name) - service._impl.get_service_by_name.assert_called_once_with(service_name) - - def test_create_role(self): - service = self.get_service_with_fake_impl() - - name = "name" - service.create_role(name) - service._impl.create_role.assert_called_once_with( - name=name, domain_name=None) - - def test_add_role(self): - service = self.get_service_with_fake_impl() - - role_id = "id" - user_id = "user_id" - project_id = "project_id" - service.add_role(role_id, user_id=user_id, project_id=project_id) - service._impl.add_role.assert_called_once_with(role_id=role_id, - user_id=user_id, - project_id=project_id) - - def test_delete_role(self): - service = self.get_service_with_fake_impl() - role = "id" - service.delete_role(role) - service._impl.delete_role.assert_called_once_with(role) - - def test_revoke_role(self): - service = self.get_service_with_fake_impl() - - role_id = "id" - user_id = "user_id" - project_id = "project_id" - - service.revoke_role(role_id, user_id=user_id, project_id=project_id) - - service._impl.revoke_role.assert_called_once_with( - role_id=role_id, user_id=user_id, project_id=project_id) - - @ddt.data((None, None, None), ("user_id", "project_id", "domain")) - def test_list_roles(self, params): - user, project, domain = params - service = self.get_service_with_fake_impl() - service.list_roles(user_id=user, project_id=project, - domain_name=domain) - service._impl.list_roles.assert_called_once_with(user_id=user, - project_id=project, - domain_name=domain) - - def test_get_role(self): - service = self.get_service_with_fake_impl() - role = "id" - service.get_role(role) - service._impl.get_role.assert_called_once_with(role) - - def test_create_ec2credentials(self): - service = self.get_service_with_fake_impl() - - user_id = "id" - project_id = "project-id" - - service.create_ec2credentials(user_id=user_id, project_id=project_id) - service._impl.create_ec2credentials.assert_called_once_with( - user_id=user_id, project_id=project_id) - - def test_list_ec2credentials(self): - service = self.get_service_with_fake_impl() - - user_id = "id" - - service.list_ec2credentials(user_id=user_id) - service._impl.list_ec2credentials.assert_called_once_with(user_id) - - def test_delete_ec2credential(self): - service = self.get_service_with_fake_impl() - - user_id = "id" - access = "access" - - service.delete_ec2credential(user_id=user_id, access=access) - service._impl.delete_ec2credential.assert_called_once_with( - user_id=user_id, access=access) - - def test_fetch_token(self): - service = self.get_service_with_fake_impl() - service.fetch_token() - service._impl.fetch_token.assert_called_once_with() - - def test_validate_token(self): - service = self.get_service_with_fake_impl() - - token = "id" - service.validate_token(token) - service._impl.validate_token.assert_called_once_with(token) diff --git a/tests/unit/plugins/openstack/services/identity/test_keystone_common.py b/tests/unit/plugins/openstack/services/identity/test_keystone_common.py deleted file mode 100644 index dac79f8e9e..0000000000 --- a/tests/unit/plugins/openstack/services/identity/test_keystone_common.py +++ /dev/null @@ -1,280 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack import service -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.services.identity import keystone_common -from tests.unit import test - - -class FullUnifiedKeystone(keystone_common.UnifiedKeystoneMixin, - service.Service): - """Implementation of UnifiedKeystoneMixin with Service base class.""" - pass - - -class UnifiedKeystoneMixinTestCase(test.TestCase): - def setUp(self): - super(UnifiedKeystoneMixinTestCase, self).setUp() - self.clients = mock.MagicMock() - self.name_generator = mock.MagicMock() - self.impl = mock.MagicMock() - self.version = "some" - self.service = FullUnifiedKeystone( - clients=self.clients, name_generator=self.name_generator) - self.service._impl = self.impl - self.service.version = self.version - - def test__unify_service(self): - class SomeFakeService(object): - id = 123123123123123 - name = "asdfasdfasdfasdfadf" - other_var = "asdfasdfasdfasdfasdfasdfasdf" - - service = self.service._unify_service(SomeFakeService()) - self.assertIsInstance(service, identity.Service) - self.assertEqual(SomeFakeService.id, service.id) - self.assertEqual(SomeFakeService.name, service.name) - - def test__unify_role(self): - class SomeFakeRole(object): - id = 123123123123123 - name = "asdfasdfasdfasdfadf" - other_var = "asdfasdfasdfasdfasdfasdfasdf" - - role = self.service._unify_role(SomeFakeRole()) - self.assertIsInstance(role, identity.Role) - self.assertEqual(SomeFakeRole.id, role.id) - self.assertEqual(SomeFakeRole.name, role.name) - - def test_delete_user(self): - user_id = "id" - - self.service.delete_user(user_id) - self.impl.delete_user.assert_called_once_with(user_id) - - def test_get_user(self): - user_id = "id" - - self.service._unify_user = mock.MagicMock() - - self.assertEqual(self.service._unify_user.return_value, - self.service.get_user(user_id)) - - self.impl.get_user.assert_called_once_with(user_id) - self.service._unify_user.assert_called_once_with( - self.impl.get_user.return_value) - - def test_create_service(self): - self.service._unify_service = mock.MagicMock() - - name = "some_Service" - service_type = "computeNextGen" - description = "we will Rock you!" - - self.assertEqual(self.service._unify_service.return_value, - self.service.create_service( - name=name, service_type=service_type, - description=description)) - - self.service._unify_service.assert_called_once_with( - self.service._impl.create_service.return_value) - self.service._impl.create_service.assert_called_once_with( - name=name, service_type=service_type, description=description) - - def test_delete_service(self): - service_id = "id" - - self.service.delete_service(service_id) - self.impl.delete_service.assert_called_once_with(service_id) - - def test_get_service(self): - service_id = "id" - - self.service._unify_service = mock.MagicMock() - - self.assertEqual(self.service._unify_service.return_value, - self.service.get_service(service_id)) - - self.impl.get_service.assert_called_once_with(service_id) - self.service._unify_service.assert_called_once_with( - self.impl.get_service.return_value) - - def test_get_service_by_name(self): - service_id = "id" - - self.service._unify_service = mock.MagicMock() - - self.assertEqual(self.service._unify_service.return_value, - self.service.get_service_by_name(service_id)) - - self.impl.get_service_by_name.assert_called_once_with(service_id) - self.service._unify_service.assert_called_once_with( - self.impl.get_service_by_name.return_value) - - def test_delete_role(self): - role_id = "id" - - self.service.delete_role(role_id) - self.impl.delete_role.assert_called_once_with(role_id) - - def test_get_role(self): - role_id = "id" - - self.service._unify_role = mock.MagicMock() - - self.assertEqual(self.service._unify_role.return_value, - self.service.get_role(role_id)) - - self.impl.get_role.assert_called_once_with(role_id) - self.service._unify_role.assert_called_once_with( - self.impl.get_role.return_value) - - def test_list_ec2credentials(self): - user_id = "id" - self.assertEqual(self.impl.list_ec2credentials.return_value, - self.service.list_ec2credentials(user_id)) - - self.impl.list_ec2credentials.assert_called_once_with(user_id) - - def test_delete_ec2credential(self): - user_id = "id" - access = mock.MagicMock() - - self.assertEqual(self.impl.delete_ec2credential.return_value, - self.service.delete_ec2credential(user_id, - access=access)) - - self.impl.delete_ec2credential.assert_called_once_with(user_id=user_id, - access=access) - - def test_fetch_token(self): - - self.assertEqual(self.impl.fetch_token.return_value, - self.service.fetch_token()) - - self.impl.fetch_token.assert_called_once_with() - - def test_validate_token(self): - token = "id" - - self.assertEqual(self.impl.validate_token.return_value, - self.service.validate_token(token)) - - self.impl.validate_token.assert_called_once_with(token) - - -class FullKeystone(service.Service, keystone_common.KeystoneMixin): - """Implementation of KeystoneMixin with Service base class.""" - pass - - -class KeystoneMixinTestCase(test.TestCase): - def setUp(self): - super(KeystoneMixinTestCase, self).setUp() - self.clients = mock.MagicMock() - self.kc = self.clients.keystone.return_value - self.name_generator = mock.MagicMock() - self.version = "some" - self.service = FullKeystone( - clients=self.clients, name_generator=self.name_generator) - self.service.version = self.version - - def test_list_users(self): - self.assertEqual(self.kc.users.list.return_value, - self.service.list_users()) - self.kc.users.list.assert_called_once_with() - - def test_delete_user(self): - user_id = "fake_id" - self.service.delete_user(user_id) - self.kc.users.delete.assert_called_once_with(user_id) - - def test_get_user(self): - user_id = "fake_id" - self.service.get_user(user_id) - self.kc.users.get.assert_called_once_with(user_id) - - def test_delete_service(self): - service_id = "fake_id" - self.service.delete_service(service_id) - self.kc.services.delete.assert_called_once_with(service_id) - - def test_list_services(self): - self.assertEqual(self.kc.services.list.return_value, - self.service.list_services()) - self.kc.services.list.assert_called_once_with() - - def test_get_service(self): - service_id = "fake_id" - self.service.get_service(service_id) - self.kc.services.get.assert_called_once_with(service_id) - - def test_get_service_by_name(self): - class FakeService(object): - def __init__(self, name): - self.name = name - service_name = "fake_name" - services = [FakeService(name="foo"), FakeService(name=service_name), - FakeService(name="bar")] - self.service.list_services = mock.MagicMock(return_value=services) - - self.assertEqual(services[1], - self.service.get_service_by_name(service_name)) - - def test_delete_role(self): - role_id = "fake_id" - self.service.delete_role(role_id) - self.kc.roles.delete.assert_called_once_with(role_id) - - def test_list_roles(self): - self.assertEqual(self.kc.roles.list.return_value, - self.service.list_roles()) - self.kc.roles.list.assert_called_once_with() - - def test_get_role(self): - role_id = "fake_id" - self.service.get_role(role_id) - self.kc.roles.get.assert_called_once_with(role_id) - - def test_list_ec2credentials(self): - user_id = "fake_id" - - self.assertEqual(self.kc.ec2.list.return_value, - self.service.list_ec2credentials(user_id)) - self.kc.ec2.list.assert_called_once_with(user_id) - - def test_delete_ec2credentials(self): - user_id = "fake_id" - access = mock.MagicMock() - - self.service.delete_ec2credential(user_id, access=access) - self.kc.ec2.delete.assert_called_once_with(user_id=user_id, - access=access) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_fetch_token(self, mock_clients): - expected_token = mock_clients.return_value.keystone.auth_ref.auth_token - self.assertEqual(expected_token, self.service.fetch_token()) - mock_clients.assert_called_once_with( - credential=self.clients.credential, - api_info=self.clients.api_info) - - def test_validate_token(self): - token = "some_token" - - self.service.validate_token(token) - self.kc.tokens.validate.assert_called_once_with(token) diff --git a/tests/unit/plugins/openstack/services/identity/test_keystone_v2.py b/tests/unit/plugins/openstack/services/identity/test_keystone_v2.py deleted file mode 100644 index 6d1a118f28..0000000000 --- a/tests/unit/plugins/openstack/services/identity/test_keystone_v2.py +++ /dev/null @@ -1,477 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import ddt -import mock - -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.services.identity import keystone_v2 -from tests.unit import test - - -PATH = "rally.plugins.openstack.services.identity.keystone_v2" - - -@ddt.ddt -class KeystoneV2ServiceTestCase(test.TestCase): - def setUp(self): - super(KeystoneV2ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.kc = self.clients.keystone.return_value - self.name_generator = mock.MagicMock() - self.service = keystone_v2.KeystoneV2Service( - self.clients, name_generator=self.name_generator) - - def test_create_tenant(self): - name = "name" - tenant = self.service.create_tenant(name) - - self.assertEqual(tenant, self.kc.tenants.create.return_value) - self.kc.tenants.create.assert_called_once_with(name) - - @ddt.data({"tenant_id": "fake_id", "name": True, "enabled": True, - "description": True}, - {"tenant_id": "fake_id", "name": "some", "enabled": False, - "description": "descr"}) - @ddt.unpack - def test_update_tenant(self, tenant_id, name, enabled, description): - - self.name_generator.side_effect = ("foo", "bar") - self.service.update_tenant(tenant_id, - name=name, - description=description, - enabled=enabled) - - name = "foo" if name is True else name - description = "bar" if description is True else description - - self.kc.tenants.update.assert_called_once_with( - tenant_id, name=name, description=description, enabled=enabled) - - def test_delete_tenant(self): - tenant_id = "fake_id" - self.service.delete_tenant(tenant_id) - self.kc.tenants.delete.assert_called_once_with(tenant_id) - - def test_list_tenants(self): - self.assertEqual(self.kc.tenants.list.return_value, - self.service.list_tenants()) - self.kc.tenants.list.assert_called_once_with() - - def test_get_tenant(self): - tenant_id = "fake_id" - self.service.get_tenant(tenant_id) - self.kc.tenants.get.assert_called_once_with(tenant_id) - - def test_create_user(self): - name = "name" - password = "passwd" - email = "rally@example.com" - tenant_id = "project" - - user = self.service.create_user(name, password=password, email=email, - tenant_id=tenant_id) - - self.assertEqual(user, self.kc.users.create.return_value) - self.kc.users.create.assert_called_once_with( - name=name, password=password, email=email, tenant_id=tenant_id, - enabled=True) - - def test_create_users(self): - self.service.create_user = mock.MagicMock() - - n = 2 - tenant_id = "some" - self.assertEqual([self.service.create_user.return_value] * n, - self.service.create_users(number_of_users=n, - tenant_id=tenant_id)) - self.assertEqual([mock.call(tenant_id=tenant_id)] * n, - self.service.create_user.call_args_list) - - def test_update_user_with_wrong_params(self): - user_id = "fake_id" - card_with_cvv2 = "1234 5678 9000 0000 : 666" - self.assertRaises(NotImplementedError, self.service.update_user, - user_id, card_with_cvv2=card_with_cvv2) - - def test_update_user(self): - user_id = "fake_id" - name = "new name" - email = "new.name2016@example.com" - enabled = True - self.service.update_user(user_id, name=name, email=email, - enabled=enabled) - self.kc.users.update.assert_called_once_with( - user_id, name=name, email=email, enabled=enabled) - - def test_update_user_password(self): - user_id = "fake_id" - password = "qwerty123" - self.service.update_user_password(user_id, password=password) - self.kc.users.update_password.assert_called_once_with( - user_id, password=password) - - @ddt.data({"name": None, "service_type": None, "description": None}, - {"name": "some", "service_type": "st", "description": "d"}) - @ddt.unpack - def test_create_service(self, name, service_type, description): - self.assertEqual(self.kc.services.create.return_value, - self.service.create_service(name=name, - service_type=service_type, - description=description)) - name = name or self.name_generator.return_value - service_type = service_type or "rally_test_type" - description = description or self.name_generator.return_value - self.kc.services.create.assert_called_once_with( - name, service_type=service_type, description=description) - - def test_create_role(self): - name = "some" - self.service.create_role(name) - self.kc.roles.create.assert_called_once_with(name) - - def test_add_role(self): - role_id = "fake_id" - user_id = "user_id" - tenant_id = "tenant_id" - - self.service.add_role(role_id, user_id=user_id, tenant_id=tenant_id) - self.kc.roles.add_user_role.assert_called_once_with( - user=user_id, role=role_id, tenant=tenant_id) - - def test_list_roles(self): - self.assertEqual(self.kc.roles.list.return_value, - self.service.list_roles()) - self.kc.roles.list.assert_called_once_with() - - def test_list_roles_for_user(self): - user_id = "user_id" - tenant_id = "tenant_id" - self.assertEqual(self.kc.roles.roles_for_user.return_value, - self.service.list_roles_for_user(user_id, - tenant_id=tenant_id)) - self.kc.roles.roles_for_user.assert_called_once_with(user_id, - tenant_id) - - def test_revoke_role(self): - role_id = "fake_id" - user_id = "user_id" - tenant_id = "tenant_id" - - self.service.revoke_role(role_id, user_id=user_id, - tenant_id=tenant_id) - - self.kc.roles.remove_user_role.assert_called_once_with( - user=user_id, role=role_id, tenant=tenant_id) - - def test_create_ec2credentials(self): - user_id = "fake_id" - tenant_id = "fake_id" - - self.assertEqual(self.kc.ec2.create.return_value, - self.service.create_ec2credentials( - user_id, tenant_id=tenant_id)) - self.kc.ec2.create.assert_called_once_with(user_id, - tenant_id=tenant_id) - - -@ddt.ddt -class UnifiedKeystoneV2ServiceTestCase(test.TestCase): - def setUp(self): - super(UnifiedKeystoneV2ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.service = keystone_v2.UnifiedKeystoneV2Service(self.clients) - self.service._impl = mock.MagicMock() - - def test_init_identity_service(self): - self.clients.keystone.return_value.version = "v2.0" - self.assertIsInstance(identity.Identity(self.clients)._impl, - keystone_v2.UnifiedKeystoneV2Service) - - def test__check_domain(self): - self.service._check_domain("Default") - self.service._check_domain("default") - self.assertRaises(NotImplementedError, self.service._check_domain, - "non-default") - - def test__unify_tenant(self): - class KeystoneV2Tenant(object): - def __init__(self, domain_id="domain_id"): - self.id = str(uuid.uuid4()) - self.name = str(uuid.uuid4()) - self.domain_id = domain_id - - tenant = KeystoneV2Tenant() - project = self.service._unify_tenant(tenant) - self.assertIsInstance(project, identity.Project) - self.assertEqual(tenant.id, project.id) - self.assertEqual(tenant.name, project.name) - self.assertEqual("default", project.domain_id) - self.assertNotEqual(tenant.domain_id, project.domain_id) - - def test__unify_user(self): - class KeystoneV2User(object): - def __init__(self, tenantId=None): - self.id = str(uuid.uuid4()) - self.name = str(uuid.uuid4()) - if tenantId is not None: - self.tenantId = tenantId - - user = KeystoneV2User() - - unified_user = self.service._unify_user(user) - self.assertIsInstance(unified_user, identity.User) - self.assertEqual(user.id, unified_user.id) - self.assertEqual(user.name, unified_user.name) - self.assertEqual("default", unified_user.domain_id) - self.assertIsNone(unified_user.project_id) - - tenant_id = "tenant_id" - user = KeystoneV2User(tenantId=tenant_id) - unified_user = self.service._unify_user(user) - self.assertIsInstance(unified_user, identity.User) - self.assertEqual(user.id, unified_user.id) - self.assertEqual(user.name, unified_user.name) - self.assertEqual("default", unified_user.domain_id) - self.assertEqual(tenant_id, unified_user.project_id) - - @mock.patch("%s.UnifiedKeystoneV2Service._check_domain" % PATH) - @mock.patch("%s.UnifiedKeystoneV2Service._unify_tenant" % PATH) - def test_create_project( - self, mock_unified_keystone_v2_service__unify_tenant, - mock_unified_keystone_v2_service__check_domain): - mock_unify_tenant = mock_unified_keystone_v2_service__unify_tenant - mock_check_domain = mock_unified_keystone_v2_service__check_domain - name = "name" - - self.assertEqual(mock_unify_tenant.return_value, - self.service.create_project(name)) - mock_check_domain.assert_called_once_with("Default") - mock_unify_tenant.assert_called_once_with( - self.service._impl.create_tenant.return_value) - self.service._impl.create_tenant.assert_called_once_with(name) - - def test_update_project(self): - tenant_id = "fake_id" - name = "name" - description = "descr" - enabled = False - - self.service.update_project(project_id=tenant_id, name=name, - description=description, enabled=enabled) - self.service._impl.update_tenant.assert_called_once_with( - tenant_id=tenant_id, name=name, description=description, - enabled=enabled) - - def test_delete_project(self): - tenant_id = "fake_id" - self.service.delete_project(tenant_id) - self.service._impl.delete_tenant.assert_called_once_with(tenant_id) - - @mock.patch("%s.UnifiedKeystoneV2Service._unify_tenant" % PATH) - def test_get_project(self, - mock_unified_keystone_v2_service__unify_tenant): - mock_unify_tenant = mock_unified_keystone_v2_service__unify_tenant - tenant_id = "id" - - self.assertEqual(mock_unify_tenant.return_value, - self.service.get_project(tenant_id)) - mock_unify_tenant.assert_called_once_with( - self.service._impl.get_tenant.return_value) - self.service._impl.get_tenant.assert_called_once_with(tenant_id) - - @mock.patch("%s.UnifiedKeystoneV2Service._unify_tenant" % PATH) - def test_list_projects(self, - mock_unified_keystone_v2_service__unify_tenant): - mock_unify_tenant = mock_unified_keystone_v2_service__unify_tenant - - tenants = [mock.MagicMock()] - self.service._impl.list_tenants.return_value = tenants - - self.assertEqual([mock_unify_tenant.return_value], - self.service.list_projects()) - mock_unify_tenant.assert_called_once_with(tenants[0]) - - @mock.patch("%s.UnifiedKeystoneV2Service._check_domain" % PATH) - @mock.patch("%s.UnifiedKeystoneV2Service._unify_user" % PATH) - def test_create_user(self, mock_unified_keystone_v2_service__unify_user, - mock_unified_keystone_v2_service__check_domain): - mock_check_domain = mock_unified_keystone_v2_service__check_domain - mock_unify_user = mock_unified_keystone_v2_service__unify_user - - name = "name" - password = "passwd" - tenant_id = "project" - - self.assertEqual(mock_unify_user.return_value, - self.service.create_user(name, password=password, - project_id=tenant_id)) - mock_check_domain.assert_called_once_with("Default") - mock_unify_user.assert_called_once_with( - self.service._impl.create_user.return_value) - self.service._impl.create_user.assert_called_once_with( - username=name, password=password, tenant_id=tenant_id, - enabled=True) - - @mock.patch("%s.UnifiedKeystoneV2Service._check_domain" % PATH) - @mock.patch("%s.UnifiedKeystoneV2Service._unify_user" % PATH) - def test_create_users(self, mock_unified_keystone_v2_service__unify_user, - mock_unified_keystone_v2_service__check_domain): - mock_check_domain = mock_unified_keystone_v2_service__check_domain - - tenant_id = "project" - n = 3 - domain_name = "Default" - - self.service.create_users( - tenant_id, number_of_users=3, - user_create_args={"domain_name": domain_name}) - mock_check_domain.assert_called_once_with(domain_name) - self.service._impl.create_users.assert_called_once_with( - tenant_id=tenant_id, number_of_users=n, - user_create_args={"domain_name": domain_name}) - - @mock.patch("%s.UnifiedKeystoneV2Service._unify_user" % PATH) - def test_list_users(self, mock_unified_keystone_v2_service__unify_user): - mock_unify_user = mock_unified_keystone_v2_service__unify_user - - users = [mock.MagicMock()] - self.service._impl.list_users.return_value = users - - self.assertEqual([mock_unify_user.return_value], - self.service.list_users()) - mock_unify_user.assert_called_once_with(users[0]) - - @ddt.data({"user_id": "id", "enabled": False, "name": "Fake", - "email": "badboy@example.com", "password": "pass"}, - {"user_id": "id", "enabled": None, "name": None, - "email": None, "password": None}) - @ddt.unpack - def test_update_user(self, user_id, enabled, name, email, password): - self.service.update_user(user_id, enabled=enabled, name=name, - email=email, password=password) - if password: - self.service._impl.update_user_password.assert_called_once_with( - user_id=user_id, password=password) - - args = {} - if enabled is not None: - args["enabled"] = enabled - if name is not None: - args["name"] = name - if email is not None: - args["email"] = email - - if args: - self.service._impl.update_user.assert_called_once_with( - user_id, **args) - - @mock.patch("%s.UnifiedKeystoneV2Service._unify_service" % PATH) - def test_list_services(self, - mock_unified_keystone_v2_service__unify_service): - mock_unify_service = mock_unified_keystone_v2_service__unify_service - - services = [mock.MagicMock()] - self.service._impl.list_services.return_value = services - - self.assertEqual([mock_unify_service.return_value], - self.service.list_services()) - mock_unify_service.assert_called_once_with(services[0]) - - @mock.patch("%s.UnifiedKeystoneV2Service._unify_role" % PATH) - def test_create_role(self, mock_unified_keystone_v2_service__unify_role): - mock_unify_role = mock_unified_keystone_v2_service__unify_role - name = "some" - - self.assertEqual(mock_unify_role.return_value, - self.service.create_role(name)) - - self.service._impl.create_role.assert_called_once_with(name) - mock_unify_role.assert_called_once_with( - self.service._impl.create_role.return_value) - - def test_add_role(self): - - role_id = "fake_id" - user_id = "user_id" - project_id = "user_id" - - self.service.add_role(role_id, user_id=user_id, - project_id=project_id) - - self.service._impl.add_role.assert_called_once_with( - user_id=user_id, role_id=role_id, tenant_id=project_id) - - def test_delete_role(self): - role_id = "fake_id" - self.service.delete_role(role_id) - self.service._impl.delete_role.assert_called_once_with(role_id) - - def test_revoke_role(self): - role_id = "fake_id" - user_id = "user_id" - project_id = "user_id" - - self.service.revoke_role(role_id, user_id=user_id, - project_id=project_id) - - self.service._impl.revoke_role.assert_called_once_with( - user_id=user_id, role_id=role_id, tenant_id=project_id) - - @mock.patch("%s.UnifiedKeystoneV2Service._unify_role" % PATH) - def test_list_roles(self, mock_unified_keystone_v2_service__unify_role): - mock_unify_role = mock_unified_keystone_v2_service__unify_role - - roles = [mock.MagicMock()] - another_roles = [mock.MagicMock()] - self.service._impl.list_roles.return_value = roles - self.service._impl.list_roles_for_user.return_value = another_roles - - # case 1 - self.assertEqual([mock_unify_role.return_value], - self.service.list_roles()) - self.service._impl.list_roles.assert_called_once_with() - mock_unify_role.assert_called_once_with(roles[0]) - self.assertFalse(self.service._impl.list_roles_for_user.called) - - self.service._impl.list_roles.reset_mock() - mock_unify_role.reset_mock() - - # case 2 - user = "user" - project = "project" - self.assertEqual([mock_unify_role.return_value], - self.service.list_roles(user_id=user, - project_id=project)) - self.service._impl.list_roles_for_user.assert_called_once_with( - user, tenant_id=project) - self.assertFalse(self.service._impl.list_roles.called) - mock_unify_role.assert_called_once_with(another_roles[0]) - - # case 3 - self.assertRaises(NotImplementedError, self.service.list_roles, - domain_name="some") - - def test_create_ec2credentials(self): - user_id = "id" - tenant_id = "tenant-id" - - self.assertEqual(self.service._impl.create_ec2credentials.return_value, - self.service.create_ec2credentials( - user_id=user_id, project_id=tenant_id)) - - self.service._impl.create_ec2credentials.assert_called_once_with( - user_id=user_id, tenant_id=tenant_id) diff --git a/tests/unit/plugins/openstack/services/identity/test_keystone_v3.py b/tests/unit/plugins/openstack/services/identity/test_keystone_v3.py deleted file mode 100644 index 800612ad99..0000000000 --- a/tests/unit/plugins/openstack/services/identity/test_keystone_v3.py +++ /dev/null @@ -1,579 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import ddt -import mock - -from rally import exceptions -from rally.plugins.openstack.services.identity import identity -from rally.plugins.openstack.services.identity import keystone_v3 -from tests.unit import test - - -PATH = "rally.plugins.openstack.services.identity.keystone_v3" - - -@ddt.ddt -class KeystoneV3ServiceTestCase(test.TestCase): - def setUp(self): - super(KeystoneV3ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.kc = self.clients.keystone.return_value - self.name_generator = mock.MagicMock() - self.service = keystone_v3.KeystoneV3Service( - self.clients, name_generator=self.name_generator) - - def test__get_domain_id_not_found(self): - from keystoneclient import exceptions as kc_exceptions - - self.kc.domains.get.side_effect = kc_exceptions.NotFound - self.kc.domains.list.return_value = [] - domain_name_or_id = "some" - - self.assertRaises(exceptions.GetResourceNotFound, - self.service._get_domain_id, domain_name_or_id) - self.kc.domains.get.assert_called_once_with(domain_name_or_id) - self.kc.domains.list.assert_called_once_with(name=domain_name_or_id) - - def test__get_domain_id_find_by_name(self): - from keystoneclient import exceptions as kc_exceptions - - self.kc.domains.get.side_effect = kc_exceptions.NotFound - domain = mock.MagicMock() - self.kc.domains.list.return_value = [domain] - domain_name_or_id = "some" - - self.assertEqual(domain.id, - self.service._get_domain_id(domain_name_or_id)) - self.kc.domains.get.assert_called_once_with(domain_name_or_id) - self.kc.domains.list.assert_called_once_with(name=domain_name_or_id) - - def test__get_domain_id_find_by_id(self): - domain = mock.MagicMock() - - self.kc.domains.get.return_value = domain - - domain_name_or_id = "some" - - self.assertEqual(domain.id, - self.service._get_domain_id(domain_name_or_id)) - self.kc.domains.get.assert_called_once_with(domain_name_or_id) - self.assertFalse(self.kc.domains.list.called) - - @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) - def test_create_project(self, mock__get_domain_id): - name = "name" - domain_name = "domain" - domain_id = "id" - - mock__get_domain_id.return_value = domain_id - - project = self.service.create_project(name, domain_name=domain_name) - - mock__get_domain_id.assert_called_once_with(domain_name) - self.assertEqual(project, self.kc.projects.create.return_value) - self.kc.projects.create.assert_called_once_with(name=name, - domain=domain_id) - - @ddt.data({"project_id": "fake_id", "name": True, "enabled": True, - "description": True}, - {"project_id": "fake_id", "name": "some", "enabled": False, - "description": "descr"}) - @ddt.unpack - def test_update_project(self, project_id, name, enabled, description): - - self.service.update_project(project_id, - name=name, - description=description, - enabled=enabled) - - if name is True: - name = self.name_generator.return_value - if description is True: - description = self.name_generator.return_value - - self.kc.projects.update.assert_called_once_with( - project_id, name=name, description=description, enabled=enabled) - - def test_delete_project(self): - project_id = "fake_id" - self.service.delete_project(project_id) - self.kc.projects.delete.assert_called_once_with(project_id) - - def test_list_projects(self): - self.assertEqual(self.kc.projects.list.return_value, - self.service.list_projects()) - self.kc.projects.list.assert_called_once_with() - - def test_get_project(self): - project_id = "fake_id" - self.service.get_project(project_id) - self.kc.projects.get.assert_called_once_with(project_id) - - @mock.patch("%s.LOG" % PATH) - @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) - def test_create_user(self, mock__get_domain_id, mock_log): - - name = "name" - password = "passwd" - project_id = "project" - domain_name = "domain" - - self.service.list_roles = mock.MagicMock(return_value=[]) - - user = self.service.create_user(name, password=password, - project_id=project_id, - domain_name=domain_name) - - self.assertEqual(user, self.kc.users.create.return_value) - self.kc.users.create.assert_called_once_with( - name=name, password=password, default_project=project_id, - domain=mock__get_domain_id.return_value, - enabled=True) - - self.assertTrue(mock_log.warning.called) - - @mock.patch("%s.LOG" % PATH) - @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) - def test_create_user_without_project_id(self, mock__get_domain_id, - mock_log): - - name = "name" - password = "passwd" - domain_name = "domain" - - self.service.list_roles = mock.MagicMock(return_value=[]) - - user = self.service.create_user(name, password=password, - domain_name=domain_name) - - self.assertEqual(user, self.kc.users.create.return_value) - self.kc.users.create.assert_called_once_with( - name=name, password=password, default_project=None, - domain=mock__get_domain_id.return_value, - enabled=True) - - self.assertFalse(self.service.list_roles.called) - self.assertFalse(mock_log.warning.called) - - @mock.patch("%s.LOG" % PATH) - @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) - def test_create_user_and_add_role( - self, mock_keystone_v3_service__get_domain_id, mock_log): - mock__get_domain_id = mock_keystone_v3_service__get_domain_id - - name = "name" - password = "passwd" - project_id = "project" - domain_name = "domain" - - class Role(object): - def __init__(self, name): - self.name = name - self.id = str(uuid.uuid4()) - - self.service.list_roles = mock.MagicMock( - return_value=[Role("admin"), Role("member")]) - self.service.add_role = mock.MagicMock() - - user = self.service.create_user(name, password=password, - project_id=project_id, - domain_name=domain_name) - - self.assertEqual(user, self.kc.users.create.return_value) - self.kc.users.create.assert_called_once_with( - name=name, password=password, default_project=project_id, - domain=mock__get_domain_id.return_value, - enabled=True) - - self.assertFalse(mock_log.warning.called) - self.service.add_role.assert_called_once_with( - role_id=self.service.list_roles.return_value[1].id, - user_id=user.id, - project_id=project_id) - - def test_create_users(self): - self.service.create_user = mock.MagicMock() - - n = 2 - project_id = "some" - self.assertEqual([self.service.create_user.return_value] * n, - self.service.create_users(number_of_users=n, - project_id=project_id)) - self.assertEqual([mock.call(project_id=project_id)] * n, - self.service.create_user.call_args_list) - - @ddt.data(None, "some") - def test_update_user(self, domain_name): - user_id = "fake_id" - name = "new name" - project_id = "new project" - password = "pass" - email = "mail" - description = "n/a" - enabled = False - default_project = "some" - - self.service._get_domain_id = mock.MagicMock() - - self.service.update_user(user_id, name=name, domain_name=domain_name, - project_id=project_id, password=password, - email=email, description=description, - enabled=enabled, - default_project=default_project) - - domain = None - if domain_name: - self.service._get_domain_id.assert_called_once_with(domain_name) - domain = self.service._get_domain_id.return_value - else: - self.assertFalse(self.service._get_domain_id.called) - - self.kc.users.update.assert_called_once_with( - user_id, name=name, domain=domain, project=project_id, - password=password, email=email, description=description, - enabled=enabled, default_project=default_project) - - @ddt.data({"name": None, "service_type": None, "description": None, - "enabled": True}, - {"name": "some", "service_type": "st", "description": "d", - "enabled": False}) - @ddt.unpack - def test_create_service(self, name, service_type, description, enabled): - self.assertEqual(self.kc.services.create.return_value, - self.service.create_service(name=name, - service_type=service_type, - description=description, - enabled=enabled)) - name = name or self.name_generator.return_value - service_type = service_type or "rally_test_type" - description = description or self.name_generator.return_value - self.kc.services.create.assert_called_once_with( - name, type=service_type, description=description, - enabled=enabled) - - @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) - def test_create_role(self, mock__get_domain_id): - - domain_name = "domain" - name = "some" - - user = self.service.create_role(name, domain_name=domain_name) - - self.assertEqual(user, self.kc.roles.create.return_value) - self.kc.roles.create.assert_called_once_with( - name, domain=mock__get_domain_id.return_value) - - @ddt.data({"domain_name": "domain", "user_id": "user", "project_id": "pr"}, - {"domain_name": None, "user_id": None, "project_id": None}) - @ddt.unpack - def test_list_roles(self, domain_name, user_id, project_id): - self.service._get_domain_id = mock.MagicMock() - self.assertEqual(self.kc.roles.list.return_value, - self.service.list_roles(user_id=user_id, - domain_name=domain_name, - project_id=project_id)) - domain = None - if domain_name: - self.service._get_domain_id.assert_called_once_with(domain_name) - domain = self.service._get_domain_id.return_value - else: - self.assertFalse(self.service._get_domain_id.called) - - self.kc.roles.list.assert_called_once_with(user=user_id, - domain=domain, - project=project_id) - - def test_add_role(self): - role_id = "fake_id" - user_id = "user_id" - project_id = "project_id" - - self.service.add_role(role_id, user_id=user_id, project_id=project_id) - self.kc.roles.grant.assert_called_once_with( - user=user_id, role=role_id, project=project_id) - - def test_revoke_role(self): - role_id = "fake_id" - user_id = "user_id" - project_id = "tenant_id" - - self.service.revoke_role(role_id, user_id=user_id, - project_id=project_id) - - self.kc.roles.revoke.assert_called_once_with( - user=user_id, role=role_id, project=project_id) - - def test_get_role(self): - role_id = "fake_id" - self.service.get_role(role_id) - self.kc.roles.get.assert_called_once_with(role_id) - - def test_create_domain(self): - name = "some_domain" - descr = "descr" - enabled = False - - self.service.create_domain(name, description=descr, enabled=enabled) - self.kc.domains.create.assert_called_once_with( - name, description=descr, enabled=enabled) - - def test_create_ec2credentials(self): - user_id = "fake_id" - project_id = "fake_id" - - self.assertEqual(self.kc.ec2.create.return_value, - self.service.create_ec2credentials( - user_id, project_id=project_id)) - self.kc.ec2.create.assert_called_once_with(user_id, - project_id=project_id) - - -@ddt.ddt -class UnifiedKeystoneV3ServiceTestCase(test.TestCase): - def setUp(self): - super(UnifiedKeystoneV3ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.service = keystone_v3.UnifiedKeystoneV3Service(self.clients) - self.service._impl = mock.MagicMock() - - def test_init_identity_service(self): - self.clients.keystone.return_value.version = "v3" - self.assertIsInstance(identity.Identity(self.clients)._impl, - keystone_v3.UnifiedKeystoneV3Service) - - def test__unify_project(self): - class KeystoneV3Project(object): - def __init__(self): - self.id = str(uuid.uuid4()) - self.name = str(uuid.uuid4()) - self.domain_id = str(uuid.uuid4()) - - project = KeystoneV3Project() - unified_project = self.service._unify_project(project) - self.assertIsInstance(unified_project, identity.Project) - self.assertEqual(project.id, unified_project.id) - self.assertEqual(project.name, unified_project.name) - self.assertEqual(project.domain_id, unified_project.domain_id) - self.assertEqual(project.domain_id, unified_project.domain_id) - - def test__unify_user(self): - class KeystoneV3User(object): - def __init__(self, project_id=None): - self.id = str(uuid.uuid4()) - self.name = str(uuid.uuid4()) - self.domain_id = str(uuid.uuid4()) - if project_id is not None: - self.default_project_id = project_id - - user = KeystoneV3User() - - unified_user = self.service._unify_user(user) - self.assertIsInstance(unified_user, identity.User) - self.assertEqual(user.id, unified_user.id) - self.assertEqual(user.name, unified_user.name) - self.assertEqual(user.domain_id, unified_user.domain_id) - self.assertIsNone(unified_user.project_id) - - project_id = "tenant_id" - user = KeystoneV3User(project_id=project_id) - unified_user = self.service._unify_user(user) - self.assertIsInstance(unified_user, identity.User) - self.assertEqual(user.id, unified_user.id) - self.assertEqual(user.name, unified_user.name) - self.assertEqual(user.domain_id, unified_user.domain_id) - self.assertEqual(project_id, unified_user.project_id) - - @mock.patch("%s.UnifiedKeystoneV3Service._unify_project" % PATH) - def test_create_project(self, - mock_unified_keystone_v3_service__unify_project): - mock_unify_project = mock_unified_keystone_v3_service__unify_project - name = "name" - domain = "domain" - - self.assertEqual(mock_unify_project.return_value, - self.service.create_project(name, domain_name=domain)) - mock_unify_project.assert_called_once_with( - self.service._impl.create_project.return_value) - self.service._impl.create_project.assert_called_once_with( - name, domain_name=domain) - - def test_update_project(self): - project_id = "fake_id" - name = "name" - description = "descr" - enabled = False - - self.service.update_project(project_id=project_id, name=name, - description=description, enabled=enabled) - self.service._impl.update_project.assert_called_once_with( - project_id=project_id, name=name, description=description, - enabled=enabled) - - def test_delete_project(self): - project_id = "fake_id" - self.service.delete_project(project_id) - self.service._impl.delete_project.assert_called_once_with(project_id) - - @mock.patch("%s.UnifiedKeystoneV3Service._unify_project" % PATH) - def test_get_project(self, - mock_unified_keystone_v3_service__unify_project): - mock_unify_project = mock_unified_keystone_v3_service__unify_project - project_id = "id" - - self.assertEqual(mock_unify_project.return_value, - self.service.get_project(project_id)) - mock_unify_project.assert_called_once_with( - self.service._impl.get_project.return_value) - self.service._impl.get_project.assert_called_once_with(project_id) - - @mock.patch("%s.UnifiedKeystoneV3Service._unify_project" % PATH) - def test_list_projects(self, - mock_unified_keystone_v3_service__unify_project): - mock_unify_project = mock_unified_keystone_v3_service__unify_project - - projects = [mock.MagicMock()] - self.service._impl.list_projects.return_value = projects - - self.assertEqual([mock_unify_project.return_value], - self.service.list_projects()) - mock_unify_project.assert_called_once_with(projects[0]) - - @mock.patch("%s.UnifiedKeystoneV3Service._unify_user" % PATH) - def test_create_user(self, mock_unified_keystone_v3_service__unify_user): - mock_unify_user = mock_unified_keystone_v3_service__unify_user - - name = "name" - password = "passwd" - project_id = "project" - domain_name = "domain" - default_role = "role" - - self.assertEqual(mock_unify_user.return_value, - self.service.create_user(name, password=password, - project_id=project_id, - domain_name=domain_name, - default_role=default_role)) - mock_unify_user.assert_called_once_with( - self.service._impl.create_user.return_value) - self.service._impl.create_user.assert_called_once_with( - username=name, password=password, project_id=project_id, - domain_name=domain_name, default_role=default_role, enabled=True) - - @mock.patch("%s.UnifiedKeystoneV3Service._unify_user" % PATH) - def test_create_users(self, mock_unified_keystone_v3_service__unify_user): - project_id = "project" - n = 3 - domain_name = "Default" - - self.service.create_users( - project_id, number_of_users=3, - user_create_args={"domain_name": domain_name}) - self.service._impl.create_users.assert_called_once_with( - project_id=project_id, number_of_users=n, - user_create_args={"domain_name": domain_name}) - - @mock.patch("%s.UnifiedKeystoneV3Service._unify_user" % PATH) - def test_list_users(self, mock_unified_keystone_v3_service__unify_user): - mock_unify_user = mock_unified_keystone_v3_service__unify_user - - users = [mock.MagicMock()] - self.service._impl.list_users.return_value = users - - self.assertEqual([mock_unify_user.return_value], - self.service.list_users()) - mock_unify_user.assert_called_once_with(users[0]) - - @ddt.data({"user_id": "id", "enabled": False, "name": "Fake", - "email": "badboy@example.com", "password": "pass"}, - {"user_id": "id", "enabled": None, "name": None, - "email": None, "password": None}) - @ddt.unpack - def test_update_user(self, user_id, enabled, name, email, password): - self.service.update_user(user_id, enabled=enabled, name=name, - email=email, password=password) - self.service._impl.update_user.assert_called_once_with( - user_id, enabled=enabled, name=name, email=email, - password=password) - - @mock.patch("%s.UnifiedKeystoneV3Service._unify_service" % PATH) - def test_list_services(self, - mock_unified_keystone_v3_service__unify_service): - mock_unify_service = mock_unified_keystone_v3_service__unify_service - - services = [mock.MagicMock()] - self.service._impl.list_services.return_value = services - - self.assertEqual([mock_unify_service.return_value], - self.service.list_services()) - mock_unify_service.assert_called_once_with(services[0]) - - @mock.patch("%s.UnifiedKeystoneV3Service._unify_role" % PATH) - def test_create_role(self, mock_unified_keystone_v3_service__unify_role): - mock_unify_role = mock_unified_keystone_v3_service__unify_role - name = "some" - domain = "some" - - self.assertEqual(mock_unify_role.return_value, - self.service.create_role(name, domain_name=domain)) - - self.service._impl.create_role.assert_called_once_with( - name, domain_name=domain) - mock_unify_role.assert_called_once_with( - self.service._impl.create_role.return_value) - - def test_add_role(self): - role_id = "fake_id" - user_id = "user_id" - project_id = "user_id" - - self.service.add_role(role_id, user_id=user_id, project_id=project_id) - - self.service._impl.add_role.assert_called_once_with( - user_id=user_id, role_id=role_id, project_id=project_id) - - def test_revoke_role(self): - role_id = "fake_id" - user_id = "user_id" - project_id = "user_id" - - self.service.revoke_role(role_id, user_id=user_id, - project_id=project_id) - - self.service._impl.revoke_role.assert_called_once_with( - user_id=user_id, role_id=role_id, project_id=project_id) - - @mock.patch("%s.UnifiedKeystoneV3Service._unify_role" % PATH) - def test_list_roles(self, mock_unified_keystone_v3_service__unify_role): - mock_unify_role = mock_unified_keystone_v3_service__unify_role - - roles = [mock.MagicMock()] - self.service._impl.list_roles.return_value = roles - - self.assertEqual([mock_unify_role.return_value], - self.service.list_roles()) - mock_unify_role.assert_called_once_with(roles[0]) - - def test_create_ec2credentials(self): - user_id = "id" - project_id = "project-id" - - self.assertEqual(self.service._impl.create_ec2credentials.return_value, - self.service.create_ec2credentials( - user_id=user_id, project_id=project_id)) - - self.service._impl.create_ec2credentials.assert_called_once_with( - user_id=user_id, project_id=project_id) diff --git a/tests/unit/plugins/openstack/services/image/__init__.py b/tests/unit/plugins/openstack/services/image/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/services/image/test_glance_common.py b/tests/unit/plugins/openstack/services/image/test_glance_common.py deleted file mode 100644 index 71b3f67e9d..0000000000 --- a/tests/unit/plugins/openstack/services/image/test_glance_common.py +++ /dev/null @@ -1,127 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from glanceclient import exc as glance_exc -import mock - -from rally import exceptions -from rally.plugins.openstack import service -from rally.plugins.openstack.services.image import glance_common -from rally.plugins.openstack.services.image import image -from tests.unit import test - - -class FullGlance(service.Service, glance_common.GlanceMixin): - """Implementation of GlanceMixin with Service base class.""" - pass - - -class GlanceMixinTestCase(test.TestCase): - def setUp(self): - super(GlanceMixinTestCase, self).setUp() - self.clients = mock.MagicMock() - self.glance = self.clients.glance.return_value - self.name_generator = mock.MagicMock() - self.version = "some" - self.service = FullGlance( - clients=self.clients, name_generator=self.name_generator) - self.service.version = self.version - - def test__get_client(self): - self.assertEqual(self.glance, - self.service._get_client()) - - def test_get_image(self): - image = "image_id" - self.assertEqual(self.glance.images.get.return_value, - self.service.get_image(image)) - self.glance.images.get.assert_called_once_with(image) - - def test_get_image_exception(self): - image_id = "image_id" - self.glance.images.get.side_effect = glance_exc.HTTPNotFound - - self.assertRaises(exceptions.GetResourceNotFound, - self.service.get_image, image_id) - - def test_delete_image(self): - image = "image_id" - self.service.delete_image(image) - self.glance.images.delete.assert_called_once_with(image) - - def test_download_image(self): - image_id = "image_id" - self.service.download_image(image_id) - self.glance.images.data.assert_called_once_with(image_id, - do_checksum=True) - - -class FullUnifiedGlance(glance_common.UnifiedGlanceMixin, - service.Service): - """Implementation of UnifiedGlanceMixin with Service base class.""" - pass - - -class UnifiedGlanceMixinTestCase(test.TestCase): - def setUp(self): - super(UnifiedGlanceMixinTestCase, self).setUp() - self.clients = mock.MagicMock() - self.name_generator = mock.MagicMock() - self.impl = mock.MagicMock() - self.version = "some" - self.service = FullUnifiedGlance( - clients=self.clients, name_generator=self.name_generator) - self.service._impl = self.impl - self.service.version = self.version - - def test__unify_image(self): - class Image(object): - def __init__(self, visibility=None, is_public=None, status=None): - self.id = uuid.uuid4() - self.name = str(uuid.uuid4()) - self.visibility = visibility - self.is_public = is_public - self.status = status - - visibility = "private" - image_obj = Image(visibility=visibility) - unified_image = self.service._unify_image(image_obj) - self.assertIsInstance(unified_image, image.UnifiedImage) - self.assertEqual(image_obj.id, unified_image.id) - self.assertEqual(image_obj.visibility, unified_image.visibility) - - image_obj = Image(is_public="public") - del image_obj.visibility - unified_image = self.service._unify_image(image_obj) - self.assertEqual(image_obj.id, unified_image.id) - self.assertEqual(image_obj.is_public, unified_image.visibility) - - def test_get_image(self): - image_id = "image_id" - self.service.get_image(image=image_id) - self.service._impl.get_image.assert_called_once_with(image=image_id) - - def test_delete_image(self): - image_id = "image_id" - self.service.delete_image(image_id) - self.service._impl.delete_image.assert_called_once_with( - image_id=image_id) - - def test_download_image(self): - image_id = "image_id" - self.service.download_image(image_id) - self.service._impl.download_image.assert_called_once_with( - image_id, do_checksum=True) diff --git a/tests/unit/plugins/openstack/services/image/test_glance_v1.py b/tests/unit/plugins/openstack/services/image/test_glance_v1.py deleted file mode 100755 index 13964fb214..0000000000 --- a/tests/unit/plugins/openstack/services/image/test_glance_v1.py +++ /dev/null @@ -1,201 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tempfile - -import ddt -import fixtures -import mock - -from rally.plugins.openstack.services.image import glance_v1 -from rally.plugins.openstack.services.image import image -from tests.unit import test - - -PATH = ("rally.plugins.openstack.services.image.glance_common." - "UnifiedGlanceMixin._unify_image") - - -@ddt.ddt -class GlanceV1ServiceTestCase(test.TestCase): - _tempfile = tempfile.NamedTemporaryFile() - - def setUp(self): - super(GlanceV1ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.gc = self.clients.glance.return_value - self.name_generator = mock.MagicMock() - self.service = glance_v1.GlanceV1Service( - self.clients, name_generator=self.name_generator) - self.mock_wait_for_status = fixtures.MockPatch( - "rally.task.utils.wait_for_status") - self.useFixture(self.mock_wait_for_status) - - @ddt.data({"location": "image_location", "is_public": True}, - {"location": _tempfile.name, "is_public": False}) - @ddt.unpack - @mock.patch("six.moves.builtins.open") - def test_create_image(self, mock_open, location, is_public): - image_name = "image_name" - container_format = "container_format" - disk_format = "disk_format" - properties = {"fakeprop": "fake"} - - image = self.service.create_image( - image_name=image_name, - container_format=container_format, - image_location=location, - disk_format=disk_format, - is_public=is_public, - properties=properties) - - call_args = {"container_format": container_format, - "disk_format": disk_format, - "is_public": is_public, - "name": image_name, - "min_disk": 0, - "min_ram": 0, - "properties": properties} - - if location.startswith("/"): - call_args["data"] = mock_open.return_value - mock_open.assert_called_once_with(location) - mock_open.return_value.close.assert_called_once_with() - else: - call_args["copy_from"] = location - - self.gc.images.create.assert_called_once_with(**call_args) - self.assertEqual(image, self.mock_wait_for_status.mock.return_value) - - @ddt.data({"image_name": None}, - {"image_name": "test_image_name"}) - @ddt.unpack - def test_update_image(self, image_name): - image_id = "image_id" - min_disk = 0 - min_ram = 0 - expected_image_name = image_name or self.name_generator.return_value - - image = self.service.update_image(image_id=image_id, - image_name=image_name, - min_disk=min_disk, - min_ram=min_ram) - self.assertEqual(self.gc.images.update.return_value, image) - self.gc.images.update.assert_called_once_with(image_id, - name=expected_image_name, - min_disk=min_disk, - min_ram=min_ram) - - @ddt.data({"status": "activate", "is_public": True, "owner": "owner"}, - {"status": "activate", "is_public": False, "owner": "owner"}, - {"status": "activate", "is_public": None, "owner": "owner"}) - @ddt.unpack - def test_list_images(self, status, is_public, owner): - self.service.list_images(is_public=is_public, status=status, - owner=owner) - self.gc.images.list.assert_called_once_with(status=status, - owner=owner, - is_public=is_public) - - def test_set_visibility(self): - image_id = "image_id" - is_public = True - self.service.set_visibility(image_id=image_id) - self.gc.images.update.assert_called_once_with( - image_id, is_public=is_public) - - -@ddt.ddt -class UnifiedGlanceV1ServiceTestCase(test.TestCase): - def setUp(self): - super(UnifiedGlanceV1ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.service = glance_v1.UnifiedGlanceV1Service(self.clients) - self.service._impl = mock.create_autospec(self.service._impl) - - @ddt.data({"visibility": "public"}, - {"visibility": "private"}) - @ddt.unpack - @mock.patch(PATH) - def test_create_image(self, mock_image__unify_image, visibility): - image_name = "image_name" - container_format = "container_format" - image_location = "image_location" - disk_format = "disk_format" - properties = {"fakeprop": "fake"} - - image = self.service.create_image(image_name=image_name, - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - properties=properties) - - is_public = visibility == "public" - callargs = {"image_name": image_name, - "container_format": container_format, - "image_location": image_location, - "disk_format": disk_format, - "is_public": is_public, - "min_disk": 0, - "min_ram": 0, - "properties": properties} - self.service._impl.create_image.assert_called_once_with(**callargs) - self.assertEqual(mock_image__unify_image.return_value, image) - - @mock.patch(PATH) - def test_update_image(self, mock_image__unify_image): - image_id = "image_id" - image_name = "image_name" - callargs = {"image_id": image_id, - "image_name": image_name, - "min_disk": 0, - "min_ram": 0} - - image = self.service.update_image(image_id, - image_name=image_name) - - self.assertEqual(mock_image__unify_image.return_value, image) - self.service._impl.update_image.assert_called_once_with(**callargs) - - @mock.patch(PATH) - def test_list_images(self, mock_image__unify_image): - images = [mock.MagicMock()] - self.service._impl.list_images.return_value = images - - status = "active" - visibility = "public" - is_public = visibility == "public" - self.assertEqual([mock_image__unify_image.return_value], - self.service.list_images(status, - visibility=visibility)) - self.service._impl.list_images.assert_called_once_with( - status=status, - is_public=is_public) - - def test_set_visibility(self): - image_id = "image_id" - visibility = "private" - is_public = visibility == "public" - self.service.set_visibility(image_id=image_id, visibility=visibility) - self.service._impl.set_visibility.assert_called_once_with( - image_id=image_id, is_public=is_public) - - def test_set_visibility_failure(self): - image_id = "image_id" - visibility = "error" - self.assertRaises(image.VisibilityException, - self.service.set_visibility, - image_id=image_id, - visibility=visibility) diff --git a/tests/unit/plugins/openstack/services/image/test_glance_v2.py b/tests/unit/plugins/openstack/services/image/test_glance_v2.py deleted file mode 100755 index bd135806dc..0000000000 --- a/tests/unit/plugins/openstack/services/image/test_glance_v2.py +++ /dev/null @@ -1,229 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tempfile - -import ddt -import fixtures -import mock - -from rally.plugins.openstack.services.image import glance_v2 -from tests.unit import test - - -PATH = "rally.plugins.openstack.services.image" - - -@ddt.ddt -class GlanceV2ServiceTestCase(test.TestCase): - _tempfile = tempfile.NamedTemporaryFile() - - def setUp(self): - super(GlanceV2ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.gc = self.clients.glance.return_value - self.name_generator = mock.MagicMock() - self.service = glance_v2.GlanceV2Service( - self.clients, name_generator=self.name_generator) - self.mock_wait_for_status = fixtures.MockPatch( - "rally.task.utils.wait_for_status") - self.useFixture(self.mock_wait_for_status) - - @ddt.data({"location": "image_location"}, - {"location": _tempfile.name}) - @ddt.unpack - @mock.patch("requests.get") - @mock.patch("six.moves.builtins.open") - def test_upload(self, mock_open, mock_requests_get, location): - image_id = "foo" - - self.service.upload_data(image_id, image_location=location) - - if location.startswith("/"): - mock_open.assert_called_once_with(location) - mock_open.return_value.close.assert_called_once_with() - self.gc.images.upload.assert_called_once_with( - image_id, mock_open.return_value) - else: - mock_requests_get.assert_called_once_with(location, stream=True) - self.gc.images.upload.assert_called_once_with( - image_id, mock_requests_get.return_value.raw) - - @mock.patch("%s.glance_v2.GlanceV2Service.upload_data" % PATH) - def test_create_image(self, mock_upload_data): - image_name = "image_name" - container_format = "container_format" - disk_format = "disk_format" - visibility = "public" - properties = {"fakeprop": "fake"} - location = "location" - - image = self.service.create_image( - image_name=image_name, - container_format=container_format, - image_location=location, - disk_format=disk_format, - visibility=visibility, - properties=properties) - - call_args = {"container_format": container_format, - "disk_format": disk_format, - "name": image_name, - "visibility": visibility, - "min_disk": 0, - "min_ram": 0, - "fakeprop": "fake"} - self.gc.images.create.assert_called_once_with(**call_args) - self.assertEqual(image, self.mock_wait_for_status.mock.return_value) - mock_upload_data.assert_called_once_with( - self.mock_wait_for_status.mock.return_value.id, - image_location=location) - - def test_update_image(self): - image_id = "image_id" - image_name1 = self.name_generator.return_value - image_name2 = "image_name" - min_disk = 0 - min_ram = 0 - remove_props = None - - # case: image_name is None: - call_args1 = {"image_id": image_id, - "name": image_name1, - "min_disk": min_disk, - "min_ram": min_ram, - "remove_props": remove_props} - image1 = self.service.update_image(image_id=image_id, - image_name=None, - min_disk=min_disk, - min_ram=min_ram, - remove_props=remove_props) - self.assertEqual(self.gc.images.update.return_value, image1) - self.gc.images.update.assert_called_once_with(**call_args1) - - # case: image_name is not None: - call_args2 = {"image_id": image_id, - "name": image_name2, - "min_disk": min_disk, - "min_ram": min_ram, - "remove_props": remove_props} - image2 = self.service.update_image(image_id=image_id, - image_name=image_name2, - min_disk=min_disk, - min_ram=min_ram, - remove_props=remove_props) - self.assertEqual(self.gc.images.update.return_value, image2) - self.gc.images.update.assert_called_with(**call_args2) - - def test_list_images(self): - status = "active" - kwargs = {"status": status} - filters = {"filters": kwargs} - self.gc.images.list.return_value = iter([1, 2, 3]) - - self.assertEqual([1, 2, 3], self.service.list_images()) - self.gc.images.list.assert_called_once_with(**filters) - - def test_set_visibility(self): - image_id = "image_id" - visibility = "shared" - self.service.set_visibility(image_id=image_id) - self.gc.images.update.assert_called_once_with( - image_id, - visibility=visibility) - - def test_deactivate_image(self): - image_id = "image_id" - self.service.deactivate_image(image_id) - self.gc.images.deactivate.assert_called_once_with(image_id) - - def test_reactivate_image(self): - image_id = "image_id" - self.service.reactivate_image(image_id) - self.gc.images.reactivate.assert_called_once_with(image_id) - - -@ddt.ddt -class UnifiedGlanceV2ServiceTestCase(test.TestCase): - def setUp(self): - super(UnifiedGlanceV2ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.service = glance_v2.UnifiedGlanceV2Service(self.clients) - self.service._impl = mock.create_autospec(self.service._impl) - - @mock.patch("%s.glance_common.UnifiedGlanceMixin._unify_image" % PATH) - def test_create_image(self, mock_image__unify_image): - image_name = "image_name" - container_format = "container_format" - image_location = "image_location" - disk_format = "disk_format" - visibility = "public" - properties = {"fakeprop": "fake"} - callargs = {"image_name": image_name, - "container_format": container_format, - "image_location": image_location, - "disk_format": disk_format, - "visibility": visibility, - "min_disk": 0, - "min_ram": 0, - "properties": properties} - - image = self.service.create_image(image_name=image_name, - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - properties=properties) - - self.assertEqual(mock_image__unify_image.return_value, image) - self.service._impl.create_image.assert_called_once_with(**callargs) - - @mock.patch("%s.glance_common.UnifiedGlanceMixin._unify_image" % PATH) - def test_update_image(self, mock_image__unify_image): - image_id = "image_id" - image_name = "image_name" - callargs = {"image_id": image_id, - "image_name": image_name, - "min_disk": 0, - "min_ram": 0, - "remove_props": None} - - image = self.service.update_image(image_id, - image_name=image_name) - - self.assertEqual(mock_image__unify_image.return_value, image) - self.service._impl.update_image.assert_called_once_with(**callargs) - - @mock.patch("%s.glance_common.UnifiedGlanceMixin._unify_image" % PATH) - def test_list_images(self, mock_image__unify_image): - images = [mock.MagicMock()] - self.service._impl.list_images.return_value = images - - status = "active" - self.assertEqual([mock_image__unify_image.return_value], - self.service.list_images(owner="foo", - visibility="shared")) - self.service._impl.list_images.assert_called_once_with( - status=status, - visibility="shared", - owner="foo" - ) - - def test_set_visibility(self): - image_id = "image_id" - visibility = "private" - - self.service.set_visibility(image_id=image_id, visibility=visibility) - self.service._impl.set_visibility.assert_called_once_with( - image_id=image_id, visibility=visibility) diff --git a/tests/unit/plugins/openstack/services/image/test_image.py b/tests/unit/plugins/openstack/services/image/test_image.py deleted file mode 100755 index dbacc22171..0000000000 --- a/tests/unit/plugins/openstack/services/image/test_image.py +++ /dev/null @@ -1,120 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.plugins.openstack.services.image import glance_v1 -from rally.plugins.openstack.services.image import glance_v2 -from rally.plugins.openstack.services.image import image -from tests.unit import test - - -@ddt.ddt -class ImageTestCase(test.TestCase): - - def setUp(self): - super(ImageTestCase, self).setUp() - self.clients = mock.MagicMock() - - def get_service_with_fake_impl(self): - path = "rally.plugins.openstack.services.image.image" - with mock.patch("%s.Image.discover_impl" % path) as mock_discover: - mock_discover.return_value = mock.MagicMock(), None - service = image.Image(self.clients) - return service - - @ddt.data(("image_name", "container_format", "image_location", - "disk_format", "visibility", "min_disk", "min_ram")) - def test_create_image(self, params): - (image_name, container_format, image_location, disk_format, - visibility, min_disk, min_ram) = params - service = self.get_service_with_fake_impl() - properties = {"fakeprop": "fake"} - - service.create_image(image_name=image_name, - container_format=container_format, - image_location=image_location, - disk_format=disk_format, - visibility=visibility, - min_disk=min_disk, - min_ram=min_ram, - properties=properties) - - service._impl.create_image.assert_called_once_with( - image_name=image_name, container_format=container_format, - image_location=image_location, disk_format=disk_format, - visibility=visibility, min_disk=min_disk, min_ram=min_ram, - properties=properties) - - @ddt.data(("image_id", "image_name", "min_disk", "min_ram", - "remove_props")) - def test_update_image(self, params): - (image_id, image_name, min_disk, min_ram, remove_props) = params - service = self.get_service_with_fake_impl() - service.update_image(image_id, - image_name=image_name, - min_disk=min_disk, - min_ram=min_ram, - remove_props=remove_props) - service._impl.update_image.assert_called_once_with( - image_id, image_name=image_name, min_disk=min_disk, - min_ram=min_ram, remove_props=remove_props) - - @ddt.data("image_id") - def test_get_image(self, param): - image_id = param - service = self.get_service_with_fake_impl() - service.get_image(image=image_id) - service._impl.get_image.assert_called_once_with(image_id) - - @ddt.data(("status", "visibility", "owner")) - def test_list_images(self, params): - status, visibility, owner = params - service = self.get_service_with_fake_impl() - service.list_images(status=status, visibility=visibility, owner=owner) - service._impl.list_images.assert_called_once_with( - status=status, visibility=visibility, owner=owner) - - @ddt.data(("image_id", "visibility")) - def test_set_visibility(self, params): - image_id, visibility = params - service = self.get_service_with_fake_impl() - service.set_visibility(image_id=image_id, visibility=visibility) - service._impl.set_visibility.assert_called_once_with( - image_id, visibility=visibility) - - def test_delete_image(self): - image_id = "image_id" - service = self.get_service_with_fake_impl() - service.delete_image(image_id=image_id) - service._impl.delete_image.assert_called_once_with(image_id) - - def test_download_image(self): - image_id = "image_id" - service = self.get_service_with_fake_impl() - service.download_image(image=image_id, do_checksum=True) - service._impl.download_image.assert_called_once_with(image_id, - do_checksum=True) - - def test_is_applicable(self): - clients = mock.Mock() - - clients.glance().version = "1.0" - self.assertTrue( - glance_v1.UnifiedGlanceV1Service.is_applicable(clients)) - - clients.glance().version = "2.0" - self.assertTrue( - glance_v2.UnifiedGlanceV2Service.is_applicable(clients)) diff --git a/tests/unit/plugins/openstack/services/storage/__init__.py b/tests/unit/plugins/openstack/services/storage/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/services/storage/test_block.py b/tests/unit/plugins/openstack/services/storage/test_block.py deleted file mode 100644 index 2e10691ae0..0000000000 --- a/tests/unit/plugins/openstack/services/storage/test_block.py +++ /dev/null @@ -1,280 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack.services.storage import block -from tests.unit import test - - -class BlockTestCase(test.TestCase): - def setUp(self): - super(BlockTestCase, self).setUp() - self.clients = mock.MagicMock() - self.service = self._get_service_with_fake_impl() - - def _get_service_with_fake_impl(self): - path = "rally.plugins.openstack.services.storage.block" - path = "%s.BlockStorage.discover_impl" % path - with mock.patch(path) as mock_discover: - mock_discover.return_value = mock.MagicMock(), None - service = block.BlockStorage(self.clients) - return service - - def test_create_volume(self): - self.assertEqual(self.service._impl.create_volume.return_value, - self.service.create_volume("fake_volume")) - self.service._impl.create_volume.assert_called_once_with( - "fake_volume", availability_zone=None, consistencygroup_id=None, - description=None, group_id=None, imageRef=None, metadata=None, - multiattach=False, name=None, project_id=None, - scheduler_hints=None, snapshot_id=None, source_replica=None, - source_volid=None, user_id=None, volume_type=None) - - def test_list_volumes(self): - self.assertEqual(self.service._impl.list_volumes.return_value, - self.service.list_volumes(detailed=True)) - self.service._impl.list_volumes.assert_called_once_with(detailed=True) - - def test_get_volume(self): - self.assertTrue(self.service._impl.get_volume.return_value, - self.service.get_volume(1)) - self.service._impl.get_volume.assert_called_once_with(1) - - def test_update_volume(self): - self.assertTrue(self.service._impl.update_volume.return_value, - self.service.update_volume(1, name="name", - description="desp")) - self.service._impl.update_volume.assert_called_once_with( - 1, name="name", description="desp") - - def test_delete_volume(self): - self.service.delete_volume("volume") - self.service._impl.delete_volume.assert_called_once_with("volume") - - def test_extend_volume(self): - self.assertEqual(self.service._impl.extend_volume.return_value, - self.service.extend_volume("volume", new_size=1)) - self.service._impl.extend_volume.assert_called_once_with("volume", - new_size=1) - - def test_list_snapshots(self): - self.assertEqual(self.service._impl.list_snapshots.return_value, - self.service.list_snapshots(detailed=True)) - self.service._impl.list_snapshots.assert_called_once_with( - detailed=True) - - def test_list_types(self): - self.assertEqual( - self.service._impl.list_types.return_value, - self.service.list_types(search_opts=None, is_public=None)) - self.service._impl.list_types.assert_called_once_with(is_public=None, - search_opts=None) - - def test_set_metadata(self): - self.assertEqual( - self.service._impl.set_metadata.return_value, - self.service.set_metadata("volume", sets=10, set_size=3)) - self.service._impl.set_metadata.assert_called_once_with( - "volume", set_size=3, sets=10) - - def test_delete_metadata(self): - keys = ["a", "b"] - self.service.delete_metadata("volume", keys=keys, deletes=10, - delete_size=3) - self.service._impl.delete_metadata.assert_called_once_with( - "volume", keys, delete_size=3, deletes=10) - - def test_update_readonly_flag(self): - self.assertEqual( - self.service._impl.update_readonly_flag.return_value, - self.service.update_readonly_flag("volume", read_only=True)) - self.service._impl.update_readonly_flag.assert_called_once_with( - "volume", read_only=True) - - def test_upload_volume_to_image(self): - self.assertEqual( - self.service._impl.upload_volume_to_image.return_value, - self.service.upload_volume_to_image("volume", - force=False, - container_format="bare", - disk_format="raw")) - self.service._impl.upload_volume_to_image.assert_called_once_with( - "volume", container_format="bare", disk_format="raw", force=False) - - def test_create_qos(self): - spaces = {"consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000"} - - self.assertEqual( - self.service._impl.create_qos.return_value, - self.service.create_qos(spaces) - ) - self.service._impl.create_qos.assert_called_once_with(spaces) - - def test_list_qos(self): - self.assertEqual( - self.service._impl.list_qos.return_value, - self.service.list_qos(True) - ) - self.service._impl.list_qos.assert_called_once_with(True) - - def test_get_qos(self): - self.assertEqual( - self.service._impl.get_qos.return_value, - self.service.get_qos("qos")) - self.service._impl.get_qos.assert_called_once_with("qos") - - def test_set_qos(self): - set_specs_args = {"test": "foo"} - self.assertEqual( - self.service._impl.set_qos.return_value, - self.service.set_qos(qos="qos", set_specs_args=set_specs_args)) - self.service._impl.set_qos.assert_called_once_with( - qos="qos", set_specs_args=set_specs_args) - - def test_qos_associate_type(self): - self.assertEqual( - self.service._impl.qos_associate_type.return_value, - self.service.qos_associate_type(qos_specs="fake_qos", - volume_type="fake_type")) - self.service._impl.qos_associate_type.assert_called_once_with( - "fake_qos", "fake_type") - - def test_qos_disassociate_type(self): - self.assertEqual( - self.service._impl.qos_disassociate_type.return_value, - self.service.qos_disassociate_type(qos_specs="fake_qos", - volume_type="fake_type")) - self.service._impl.qos_disassociate_type.assert_called_once_with( - "fake_qos", "fake_type") - - def test_create_snapshot(self): - self.assertEqual( - self.service._impl.create_snapshot.return_value, - self.service.create_snapshot(1, force=False, name=None, - description=None, metadata=None)) - self.service._impl.create_snapshot.assert_called_once_with( - 1, force=False, name=None, description=None, metadata=None) - - def test_delete_snapshot(self): - self.service.delete_snapshot("snapshot") - self.service._impl.delete_snapshot.assert_called_once_with("snapshot") - - def test_create_backup(self): - self.assertEqual( - self.service._impl.create_backup.return_value, - self.service.create_backup(1, container=None, - name=None, description=None, - incremental=False, force=False, - snapshot_id=None)) - self.service._impl.create_backup.assert_called_once_with( - 1, container=None, name=None, description=None, incremental=False, - force=False, snapshot_id=None) - - def test_delete_backup(self): - self.service.delete_backup("backup") - self.service._impl.delete_backup.assert_called_once_with("backup") - - def test_restore_backup(self): - self.assertEqual(self.service._impl.restore_backup.return_value, - self.service.restore_backup(1, volume_id=1)) - self.service._impl.restore_backup.assert_called_once_with( - 1, volume_id=1) - - def test_list_backups(self): - self.assertEqual(self.service._impl.list_backups.return_value, - self.service.list_backups(detailed=True)) - self.service._impl.list_backups.assert_called_once_with(detailed=True) - - def test_list_transfers(self): - self.assertEqual( - self.service._impl.list_transfers.return_value, - self.service.list_transfers(detailed=True, search_opts=None)) - self.service._impl.list_transfers.assert_called_once_with( - detailed=True, search_opts=None) - - def test_create_volume_type(self): - self.assertEqual( - self.service._impl.create_volume_type.return_value, - self.service.create_volume_type(name="type", - description=None, - is_public=True)) - self.service._impl.create_volume_type.assert_called_once_with( - name="type", description=None, is_public=True) - - def test_get_volume_type(self): - self.assertEqual( - self.service._impl.get_volume_type.return_value, - self.service.get_volume_type("volume_type")) - self.service._impl.get_volume_type.assert_called_once_with( - "volume_type") - - def test_delete_volume_type(self): - self.service.delete_volume_type("volume_type") - self.service._impl.delete_volume_type.assert_called_once_with( - "volume_type") - - def test_set_volume_type_keys(self): - self.assertEqual( - self.service._impl.set_volume_type_keys.return_value, - self.service.set_volume_type_keys("volume_type", - metadata="metadata")) - self.service._impl.set_volume_type_keys.assert_called_once_with( - "volume_type", "metadata") - - def test_transfer_create(self): - self.assertEqual(self.service._impl.transfer_create.return_value, - self.service.transfer_create(1, name="t")) - self.service._impl.transfer_create.assert_called_once_with( - 1, name="t") - - def test_transfer_accept(self): - self.assertEqual(self.service._impl.transfer_accept.return_value, - self.service.transfer_accept(1, auth_key=2)) - self.service._impl.transfer_accept.assert_called_once_with( - 1, auth_key=2) - - def test_create_encryption_type(self): - self.assertEqual( - self.service._impl.create_encryption_type.return_value, - self.service.create_encryption_type("type", specs=2)) - self.service._impl.create_encryption_type.assert_called_once_with( - "type", specs=2) - - def test_get_encryption_type(self): - self.assertEqual( - self.service._impl.get_encryption_type.return_value, - self.service.get_encryption_type("type")) - self.service._impl.get_encryption_type.assert_called_once_with( - "type") - - def test_list_encryption_type(self): - self.assertEqual(self.service._impl.list_encryption_type.return_value, - self.service.list_encryption_type(search_opts=None)) - self.service._impl.list_encryption_type.assert_called_once_with( - search_opts=None) - - def test_delete_encryption_type(self): - self.service.delete_encryption_type("type") - self.service._impl.delete_encryption_type.assert_called_once_with( - "type") - - def test_update_encryption_type(self): - self.assertEqual( - self.service._impl.update_encryption_type.return_value, - self.service.update_encryption_type("type", specs=3)) - self.service._impl.update_encryption_type.assert_called_once_with( - "type", specs=3) diff --git a/tests/unit/plugins/openstack/services/storage/test_cinder_common.py b/tests/unit/plugins/openstack/services/storage/test_cinder_common.py deleted file mode 100644 index b66e70e243..0000000000 --- a/tests/unit/plugins/openstack/services/storage/test_cinder_common.py +++ /dev/null @@ -1,705 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import ddt -import mock - -from rally.common import cfg -from rally import exceptions -from rally.plugins.openstack import service -from rally.plugins.openstack.services.storage import block -from rally.plugins.openstack.services.storage import cinder_common -from tests.unit import fakes -from tests.unit import test - -BASE_PATH = "rally.plugins.openstack.services.storage" -CONF = cfg.CONF - - -class FullCinder(service.Service, cinder_common.CinderMixin): - """Implementation of CinderMixin with Service base class.""" - pass - - -@ddt.ddt -class CinderMixinTestCase(test.ScenarioTestCase): - def setUp(self): - super(CinderMixinTestCase, self).setUp() - self.clients = mock.MagicMock() - self.cinder = self.clients.cinder.return_value - self.name_generator = uuid.uuid1 - self.version = "some" - self.service = FullCinder( - clients=self.clients, name_generator=self.name_generator) - self.service.version = self.version - - def atomic_actions(self): - return self.service._atomic_actions - - def test__get_client(self): - self.assertEqual(self.cinder, - self.service._get_client()) - - def test__update_resource_with_manage(self): - resource = mock.MagicMock(id=1, manager=mock.MagicMock()) - self.assertEqual(resource.manager.get.return_value, - self.service._update_resource(resource)) - resource.manager.get.assert_called_once_with( - resource.id) - - @ddt.data({"resource": block.Volume(id=1, name="vol", - size=1, status="st"), - "attr": "volumes"}, - {"resource": block.VolumeSnapshot(id=2, name="snapshot", - volume_id=1, status="st"), - "attr": "volume_snapshots"}, - {"resource": block.VolumeBackup(id=3, name="backup", - volume_id=1, status="st"), - "attr": "backups"}) - @ddt.unpack - def test__update_resource_with_no_manage(self, resource, attr): - self.assertEqual(getattr(self.cinder, attr).get.return_value, - self.service._update_resource(resource)) - getattr(self.cinder, attr).get.assert_called_once_with( - resource.id) - - def test__update_resource_with_not_found(self): - manager = mock.MagicMock() - resource = fakes.FakeResource(manager=manager, status="ERROR") - - class NotFoundException(Exception): - http_status = 404 - - manager.get = mock.MagicMock(side_effect=NotFoundException) - self.assertRaises(exceptions.GetResourceNotFound, - self.service._update_resource, resource) - - def test__update_resource_with_http_exception(self): - manager = mock.MagicMock() - resource = fakes.FakeResource(manager=manager, status="ERROR") - - class HTTPException(Exception): - pass - - manager.get = mock.MagicMock(side_effect=HTTPException) - self.assertRaises(exceptions.GetResourceFailure, - self.service._update_resource, resource) - - def test__wait_available_volume(self): - volume = fakes.FakeVolume() - self.assertEqual(self.mock_wait_for_status.mock.return_value, - self.service._wait_available_volume(volume)) - - self.mock_wait_for_status.mock.assert_called_once_with( - volume, - ready_statuses=["available"], - update_resource=self.service._update_resource, - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack.cinder_volume_create_poll_interval - ) - - def test_list_volumes(self): - self.assertEqual(self.cinder.volumes.list.return_value, - self.service.list_volumes()) - self.cinder.volumes.list.assert_called_once_with(True) - - def test_get_volume(self): - self.assertEqual(self.cinder.volumes.get.return_value, - self.service.get_volume(1)) - self.cinder.volumes.get.assert_called_once_with(1) - - @mock.patch("%s.block.BlockStorage.create_volume" % BASE_PATH) - def test_delete_volume(self, mock_create_volume): - volume = mock_create_volume.return_value - self.service.delete_volume(volume) - - self.cinder.volumes.delete.assert_called_once_with(volume) - self.mock_wait_for_status.mock.assert_called_once_with( - volume, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.service._update_resource, - timeout=CONF.openstack.cinder_volume_delete_timeout, - check_interval=CONF.openstack.cinder_volume_delete_poll_interval - ) - - @mock.patch("%s.block.BlockStorage.create_volume" % BASE_PATH) - def test_extend_volume(self, mock_create_volume): - volume = mock_create_volume.return_value - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - - self.assertEqual(self.service._wait_available_volume.return_value, - self.service.extend_volume(volume, 1)) - - self.cinder.volumes.extend.assert_called_once_with(volume, 1) - self.service._wait_available_volume.assert_called_once_with(volume) - - def test_list_snapshots(self): - self.assertEqual(self.cinder.volume_snapshots.list.return_value, - self.service.list_snapshots()) - self.cinder.volume_snapshots.list.assert_called_once_with(True) - - def test_set_metadata(self): - volume = fakes.FakeVolume() - - self.service.set_metadata(volume, sets=2, set_size=4) - calls = self.cinder.volumes.set_metadata.call_args_list - self.assertEqual(2, len(calls)) - for call in calls: - call_volume, metadata = call[0] - self.assertEqual(volume, call_volume) - self.assertEqual(4, len(metadata)) - - def test_delete_metadata(self): - volume = fakes.FakeVolume() - - keys = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"] - self.service.delete_metadata(volume, keys, deletes=3, delete_size=4) - calls = self.cinder.volumes.delete_metadata.call_args_list - self.assertEqual(3, len(calls)) - all_deleted = [] - for call in calls: - call_volume, del_keys = call[0] - self.assertEqual(volume, call_volume) - self.assertEqual(4, len(del_keys)) - for key in del_keys: - self.assertIn(key, keys) - self.assertNotIn(key, all_deleted) - all_deleted.append(key) - - def test_delete_metadata_not_enough_keys(self): - volume = fakes.FakeVolume() - - keys = ["a", "b", "c", "d", "e"] - self.assertRaises(exceptions.InvalidArgumentsException, - self.service.delete_metadata, - volume, keys, deletes=2, delete_size=3) - - def test_update_readonly_flag(self): - fake_volume = mock.MagicMock() - self.service.update_readonly_flag(fake_volume, "fake_flag") - self.cinder.volumes.update_readonly_flag.assert_called_once_with( - fake_volume, "fake_flag") - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test_upload_volume_to_image(self, mock_image): - volume = mock.Mock() - image = {"os-volume_upload_image": {"image_id": 1}} - self.cinder.volumes.upload_to_image.return_value = (None, image) - glance = mock_image.return_value - - self.service.generate_random_name = mock.Mock( - return_value="test_vol") - self.service.upload_volume_to_image(volume, False, - "container", "disk") - - self.cinder.volumes.upload_to_image.assert_called_once_with( - volume, False, "test_vol", "container", "disk") - self.mock_wait_for_status.mock.assert_has_calls([ - mock.call( - volume, - ready_statuses=["available"], - update_resource=self.service._update_resource, - timeout=CONF.openstack.cinder_volume_create_timeout, - check_interval=CONF.openstack. - cinder_volume_create_poll_interval), - mock.call( - glance.get_image.return_value, - ready_statuses=["active"], - update_resource=glance.get_image, - timeout=CONF.openstack.glance_image_create_timeout, - check_interval=CONF.openstack. - glance_image_create_poll_interval) - ]) - glance.get_image.assert_called_once_with(1) - - def test_create_qos(self): - specs = {"consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000"} - random_name = "random_name" - self.service.generate_random_name = mock.MagicMock( - return_value=random_name) - - result = self.service.create_qos(specs) - self.assertEqual( - self.cinder.qos_specs.create.return_value, - result - ) - self.cinder.qos_specs.create.assert_called_once_with(random_name, - specs) - - def test_list_qos(self): - result = self.service.list_qos(True) - self.assertEqual( - self.cinder.qos_specs.list.return_value, - result - ) - self.cinder.qos_specs.list.assert_called_once_with(True) - - def test_get_qos(self): - result = self.service.get_qos("qos") - self.assertEqual( - self.cinder.qos_specs.get.return_value, - result) - self.cinder.qos_specs.get.assert_called_once_with("qos") - - def test_set_qos(self): - set_specs_args = {"test": "foo"} - result = self.service.set_qos("qos", set_specs_args) - self.assertEqual( - self.cinder.qos_specs.set_keys.return_value, - result) - self.cinder.qos_specs.set_keys.assert_called_once_with("qos", - set_specs_args) - - def test_qos_associate_type(self): - self.service.qos_associate_type("qos", "type_id") - self.cinder.qos_specs.associate.assert_called_once_with( - "qos", "type_id") - - def test_qos_disassociate_type(self): - self.service.qos_disassociate_type("qos", "type_id") - self.cinder.qos_specs.disassociate.assert_called_once_with( - "qos", "type_id") - - def test_delete_snapshot(self): - snapshot = mock.Mock() - self.service.delete_snapshot(snapshot) - self.cinder.volume_snapshots.delete.assert_called_once_with(snapshot) - self.mock_wait_for_status.mock.assert_called_once_with( - snapshot, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.service._update_resource, - timeout=cfg.CONF.openstack.cinder_volume_create_timeout, - check_interval=cfg.CONF.openstack - .cinder_volume_create_poll_interval) - - def test_delete_backup(self): - backup = mock.Mock() - self.service.delete_backup(backup) - self.cinder.backups.delete.assert_called_once_with(backup) - self.mock_wait_for_status.mock.assert_called_once_with( - backup, - ready_statuses=["deleted"], - check_deletion=True, - update_resource=self.service._update_resource, - timeout=cfg.CONF.openstack.cinder_volume_create_timeout, - check_interval=cfg.CONF.openstack - .cinder_volume_create_poll_interval) - - def test_restore_backup(self): - backup = mock.Mock() - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = mock.Mock() - - return_restore = self.service.restore_backup(backup.id, None) - - self.cinder.restores.restore.assert_called_once_with(backup.id, None) - self.cinder.volumes.get.assert_called_once_with( - self.cinder.restores.restore.return_value.volume_id) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.volumes.get.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_restore) - - def test_list_backups(self): - return_backups_list = self.service.list_backups() - self.assertEqual( - self.cinder.backups.list.return_value, - return_backups_list) - - def test_list_transfers(self): - return_transfers_list = self.service.list_transfers() - self.assertEqual( - self.cinder.transfers.list.return_value, - return_transfers_list) - - def test_get_volume_type(self): - self.assertEqual(self.cinder.volume_types.get.return_value, - self.service.get_volume_type("volume_type")) - self.cinder.volume_types.get.assert_called_once_with( - "volume_type") - - def test_delete_volume_type(self): - volume_type = mock.Mock() - self.service.delete_volume_type(volume_type) - self.cinder.volume_types.delete.assert_called_once_with( - volume_type) - - def test_set_volume_type_keys(self): - volume_type = mock.Mock() - self.assertEqual(volume_type.set_keys.return_value, - self.service.set_volume_type_keys( - volume_type, metadata="metadata")) - - volume_type.set_keys.assert_called_once_with("metadata") - - def test_transfer_create(self): - fake_volume = mock.MagicMock() - random_name = "random_name" - self.service.generate_random_name = mock.MagicMock( - return_value=random_name) - result = self.service.transfer_create(fake_volume.id) - self.assertEqual( - self.cinder.transfers.create.return_value, - result) - self.cinder.transfers.create.assert_called_once_with( - fake_volume.id, name=random_name) - - def test_transfer_create_with_name(self): - fake_volume = mock.MagicMock() - result = self.service.transfer_create(fake_volume.id, name="t") - self.assertEqual( - self.cinder.transfers.create.return_value, - result) - self.cinder.transfers.create.assert_called_once_with( - fake_volume.id, name="t") - - def test_transfer_accept(self): - fake_transfer = mock.MagicMock() - result = self.service.transfer_accept(fake_transfer.id, "fake_key") - self.assertEqual( - self.cinder.transfers.accept.return_value, - result) - self.cinder.transfers.accept.assert_called_once_with( - fake_transfer.id, "fake_key") - - def test_create_encryption_type(self): - volume_type = mock.Mock() - specs = { - "provider": "foo_pro", - "cipher": "foo_cip", - "key_size": 512, - "control_location": "foo_con" - } - result = self.service.create_encryption_type(volume_type, specs) - - self.assertEqual( - self.cinder.volume_encryption_types.create.return_value, result) - self.cinder.volume_encryption_types.create.assert_called_once_with( - volume_type, specs) - - def test_get_encryption_type(self): - volume_type = mock.Mock() - result = self.service.get_encryption_type(volume_type) - - self.assertEqual( - self.cinder.volume_encryption_types.get.return_value, result) - self.cinder.volume_encryption_types.get.assert_called_once_with( - volume_type) - - def test_list_encryption_type(self): - return_encryption_types_list = self.service.list_encryption_type() - self.assertEqual(self.cinder.volume_encryption_types.list.return_value, - return_encryption_types_list) - - def test_delete_encryption_type(self): - resp = mock.MagicMock(status_code=202) - self.cinder.volume_encryption_types.delete.return_value = [resp] - self.service.delete_encryption_type("type") - self.cinder.volume_encryption_types.delete.assert_called_once_with( - "type") - - def test_delete_encryption_type_raise(self): - resp = mock.MagicMock(status_code=404) - self.cinder.volume_encryption_types.delete.return_value = [resp] - self.assertRaises(exceptions.RallyException, - self.service.delete_encryption_type, "type") - self.cinder.volume_encryption_types.delete.assert_called_once_with( - "type") - - def test_update_encryption_type(self): - volume_type = mock.Mock() - specs = { - "provider": "foo_pro", - "cipher": "foo_cip", - "key_size": 512, - "control_location": "foo_con" - } - result = self.service.update_encryption_type(volume_type, specs) - - self.assertEqual( - self.cinder.volume_encryption_types.update.return_value, result) - self.cinder.volume_encryption_types.update.assert_called_once_with( - volume_type, specs) - - -class FullUnifiedCinder(cinder_common.UnifiedCinderMixin, - service.Service): - """Implementation of UnifiedCinderMixin with Service base class.""" - pass - - -class UnifiedCinderMixinTestCase(test.TestCase): - def setUp(self): - super(UnifiedCinderMixinTestCase, self).setUp() - self.clients = mock.MagicMock() - self.name_generator = mock.MagicMock() - self.impl = mock.MagicMock() - self.version = "some" - self.service = FullUnifiedCinder( - clients=self.clients, name_generator=self.name_generator) - self.service._impl = self.impl - self.service.version = self.version - - def test__unify_backup(self): - class SomeBackup(object): - id = 1 - name = "backup" - volume_id = "volume" - status = "st" - backup = self.service._unify_backup(SomeBackup()) - self.assertEqual(1, backup.id) - self.assertEqual("backup", backup.name) - self.assertEqual("volume", backup.volume_id) - self.assertEqual("st", backup.status) - - def test__unify_transfer(self): - class SomeTransfer(object): - id = 1 - name = "transfer" - volume_id = "volume" - status = "st" - transfer = self.service._unify_backup(SomeTransfer()) - self.assertEqual(1, transfer.id) - self.assertEqual("transfer", transfer.name) - self.assertEqual("volume", transfer.volume_id) - self.assertEqual("st", transfer.status) - - def test__unify_qos(self): - class Qos(object): - id = 1 - name = "qos" - specs = {"key1": "value1"} - qos = self.service._unify_qos(Qos()) - self.assertEqual(1, qos.id) - self.assertEqual("qos", qos.name) - self.assertEqual({"key1": "value1"}, qos.specs) - - def test__unify_encryption_type(self): - class SomeEncryptionType(object): - encryption_id = 1 - volume_type_id = "volume_type" - encryption_type = self.service._unify_encryption_type( - SomeEncryptionType()) - self.assertEqual(1, encryption_type.id) - self.assertEqual("volume_type", encryption_type.volume_type_id) - - def test_delete_volume(self): - self.service.delete_volume("volume") - self.service._impl.delete_volume.assert_called_once_with("volume") - - def test_set_metadata(self): - self.assertEqual( - self.service._impl.set_metadata.return_value, - self.service.set_metadata("volume", sets=10, set_size=3)) - self.service._impl.set_metadata.assert_called_once_with( - "volume", set_size=3, sets=10) - - def test_delete_metadata(self): - keys = ["a", "b"] - self.service.delete_metadata("volume", keys=keys, deletes=10, - delete_size=3) - self.service._impl.delete_metadata.assert_called_once_with( - "volume", keys=keys, delete_size=3, deletes=10) - - def test_update_readonly_flag(self): - self.assertEqual( - self.service._impl.update_readonly_flag.return_value, - self.service.update_readonly_flag("volume", read_only=True)) - self.service._impl.update_readonly_flag.assert_called_once_with( - "volume", read_only=True) - - def test_upload_volume_to_image(self): - self.assertEqual( - self.service._impl.upload_volume_to_image.return_value, - self.service.upload_volume_to_image("volume", - force=False, - container_format="bare", - disk_format="raw")) - self.service._impl.upload_volume_to_image.assert_called_once_with( - "volume", container_format="bare", disk_format="raw", force=False) - - def test_create_qos(self): - specs = {"consumer": "both", - "write_iops_sec": "10", - "read_iops_sec": "1000"} - self.service._unify_qos = mock.MagicMock() - self.assertEqual( - self.service._unify_qos.return_value, - self.service.create_qos(specs) - ) - self.service._impl.create_qos.assert_called_once_with(specs) - self.service._unify_qos.assert_called_once_with( - self.service._impl.create_qos.return_value - ) - - def test_list_qos(self): - self.service._unify_qos = mock.MagicMock() - self.service._impl.list_qos.return_value = ["qos"] - self.assertEqual( - [self.service._unify_qos.return_value], - self.service.list_qos(True) - ) - self.service._impl.list_qos.assert_called_once_with(True) - self.service._unify_qos.assert_called_once_with("qos") - - def test_get_qos(self): - self.service._unify_qos = mock.MagicMock() - self.assertEqual( - self.service._unify_qos.return_value, - self.service.get_qos("qos")) - self.service._impl.get_qos.assert_called_once_with("qos") - self.service._unify_qos.assert_called_once_with( - self.service._impl.get_qos.return_value - ) - - def test_set_qos(self): - set_specs_args = {"test": "foo"} - self.service._unify_qos = mock.MagicMock() - qos = mock.MagicMock() - self.assertEqual( - self.service._unify_qos.return_value, - self.service.set_qos(qos, set_specs_args)) - self.service._impl.set_qos.assert_called_once_with(qos.id, - set_specs_args) - self.service._unify_qos.assert_called_once_with(qos) - - def test_qos_associate_type(self): - self.service._unify_qos = mock.MagicMock() - self.assertEqual( - self.service._unify_qos.return_value, - self.service.qos_associate_type("qos", "type_id")) - self.service._impl.qos_associate_type.assert_called_once_with( - "qos", "type_id") - self.service._unify_qos.assert_called_once_with("qos") - - def test_qos_disassociate_type(self): - self.service._unify_qos = mock.MagicMock() - self.assertEqual( - self.service._unify_qos.return_value, - self.service.qos_disassociate_type("qos", "type_id")) - self.service._impl.qos_disassociate_type.assert_called_once_with( - "qos", "type_id") - self.service._unify_qos.assert_called_once_with("qos") - - def test_delete_snapshot(self): - self.service.delete_snapshot("snapshot") - self.service._impl.delete_snapshot.assert_called_once_with("snapshot") - - def test_delete_backup(self): - self.service.delete_backup("backup") - self.service._impl.delete_backup.assert_called_once_with("backup") - - def test_list_backups(self): - self.service._unify_backup = mock.MagicMock() - self.service._impl.list_backups.return_value = ["backup"] - self.assertEqual([self.service._unify_backup.return_value], - self.service.list_backups(detailed=True)) - self.service._impl.list_backups.assert_called_once_with(detailed=True) - self.service._unify_backup.assert_called_once_with( - "backup") - - def test_list_transfers(self): - self.service._unify_transfer = mock.MagicMock() - self.service._impl.list_transfers.return_value = ["transfer"] - self.assertEqual( - [self.service._unify_transfer.return_value], - self.service.list_transfers(detailed=True, search_opts=None)) - self.service._impl.list_transfers.assert_called_once_with( - detailed=True, search_opts=None) - self.service._unify_transfer.assert_called_once_with( - "transfer") - - def test_get_volume_type(self): - self.assertEqual(self.service._impl.get_volume_type.return_value, - self.service.get_volume_type("volume_type")) - self.service._impl.get_volume_type.assert_called_once_with( - "volume_type") - - def test_delete_volume_type(self): - self.assertEqual(self.service._impl.delete_volume_type.return_value, - self.service.delete_volume_type("volume_type")) - self.service._impl.delete_volume_type.assert_called_once_with( - "volume_type") - - def test_set_volume_type_keys(self): - self.assertEqual(self.service._impl.set_volume_type_keys.return_value, - self.service.set_volume_type_keys( - "volume_type", metadata="metadata")) - self.service._impl.set_volume_type_keys.assert_called_once_with( - "volume_type", "metadata") - - def test_transfer_create(self): - self.service._unify_transfer = mock.MagicMock() - self.assertEqual(self.service._unify_transfer.return_value, - self.service.transfer_create(1)) - self.service._impl.transfer_create.assert_called_once_with( - 1, name=None) - self.service._unify_transfer.assert_called_once_with( - self.service._impl.transfer_create.return_value) - - def test_transfer_accept(self): - self.service._unify_transfer = mock.MagicMock() - self.assertEqual(self.service._unify_transfer.return_value, - self.service.transfer_accept(1, auth_key=2)) - self.service._impl.transfer_accept.assert_called_once_with( - 1, auth_key=2) - self.service._unify_transfer.assert_called_once_with( - self.service._impl.transfer_accept.return_value) - - def test_create_encryption_type(self): - self.service._unify_encryption_type = mock.MagicMock() - self.assertEqual( - self.service._unify_encryption_type.return_value, - self.service.create_encryption_type("type", specs=2)) - self.service._impl.create_encryption_type.assert_called_once_with( - "type", specs=2) - self.service._unify_encryption_type.assert_called_once_with( - self.service._impl.create_encryption_type.return_value) - - def test_get_encryption_type(self): - self.service._unify_encryption_type = mock.MagicMock() - self.assertEqual( - self.service._unify_encryption_type.return_value, - self.service.get_encryption_type("type")) - self.service._impl.get_encryption_type.assert_called_once_with( - "type") - self.service._unify_encryption_type.assert_called_once_with( - self.service._impl.get_encryption_type.return_value) - - def test_list_encryption_type(self): - self.service._unify_encryption_type = mock.MagicMock() - self.service._impl.list_encryption_type.return_value = ["encryption"] - self.assertEqual([self.service._unify_encryption_type.return_value], - self.service.list_encryption_type(search_opts=None)) - self.service._impl.list_encryption_type.assert_called_once_with( - search_opts=None) - self.service._unify_encryption_type.assert_called_once_with( - "encryption") - - def test_delete_encryption_type(self): - self.service.delete_encryption_type("type") - self.service._impl.delete_encryption_type.assert_called_once_with( - "type") - - def test_update_encryption_type(self): - self.service.update_encryption_type("type", specs=3) - self.service._impl.update_encryption_type.assert_called_once_with( - "type", specs=3) diff --git a/tests/unit/plugins/openstack/services/storage/test_cinder_v1.py b/tests/unit/plugins/openstack/services/storage/test_cinder_v1.py deleted file mode 100644 index b2a13027c5..0000000000 --- a/tests/unit/plugins/openstack/services/storage/test_cinder_v1.py +++ /dev/null @@ -1,355 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import cfg -from rally.plugins.openstack.services.storage import cinder_v1 -from tests.unit import fakes -from tests.unit import test - -BASE_PATH = "rally.plugins.openstack.services.storage" -CONF = cfg.CONF - - -class CinderV1ServiceTestCase(test.ScenarioTestCase): - def setUp(self): - super(CinderV1ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.cinder = self.clients.cinder.return_value - self.name_generator = mock.MagicMock() - self.service = cinder_v1.CinderV1Service( - self.clients, name_generator=self.name_generator) - - def atomic_actions(self): - return self.service._atomic_actions - - def test_create_volume(self): - self.service.generate_random_name = mock.MagicMock( - return_value="volume") - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - - return_volume = self.service.create_volume(1) - - kwargs = {"display_name": "volume", - "display_description": None, - "snapshot_id": None, - "source_volid": None, - "volume_type": None, - "user_id": None, - "project_id": None, - "availability_zone": None, - "metadata": None, - "imageRef": None} - self.cinder.volumes.create.assert_called_once_with(1, **kwargs) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.volumes.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_volume) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.create_volume") - - @mock.patch("%s.cinder_v1.random" % BASE_PATH) - def test_create_volume_with_size_range(self, mock_random): - mock_random.randint.return_value = 3 - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - - return_volume = self.service.create_volume( - size={"min": 1, "max": 5}, display_name="volume") - - kwargs = {"display_name": "volume", - "display_description": None, - "snapshot_id": None, - "source_volid": None, - "volume_type": None, - "user_id": None, - "project_id": None, - "availability_zone": None, - "metadata": None, - "imageRef": None} - self.cinder.volumes.create.assert_called_once_with( - 3, **kwargs) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.volumes.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_volume) - - def test_update_volume(self): - return_value = {"volume": fakes.FakeVolume()} - self.cinder.volumes.update.return_value = return_value - - self.assertEqual(return_value["volume"], - self.service.update_volume(1)) - self.cinder.volumes.update.assert_called_once_with(1) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.update_volume") - - def test_update_volume_with_name_description(self): - return_value = {"volume": fakes.FakeVolume()} - self.cinder.volumes.update.return_value = return_value - - return_volume = self.service.update_volume( - 1, display_name="volume", display_description="fake") - - self.cinder.volumes.update.assert_called_once_with( - 1, display_name="volume", display_description="fake") - self.assertEqual(return_value["volume"], return_volume) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.update_volume") - - def test_list_types(self): - self.assertEqual(self.cinder.volume_types.list.return_value, - self.service.list_types(search_opts=None)) - - self.cinder.volume_types.list.assert_called_once_with(None) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.list_types") - - def test_create_snapshot(self): - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - self.service.generate_random_name = mock.MagicMock( - return_value="snapshot") - - return_snapshot = self.service.create_snapshot(1) - - self.cinder.volume_snapshots.create.assert_called_once_with( - 1, display_name="snapshot", display_description=None, - force=False) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.volume_snapshots.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_snapshot) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.create_snapshot") - - def test_create_snapshot_with_name(self): - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - - return_snapshot = self.service.create_snapshot( - 1, display_name="snapshot") - - self.cinder.volume_snapshots.create.assert_called_once_with( - 1, display_name="snapshot", display_description=None, - force=False) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.volume_snapshots.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_snapshot) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.create_snapshot") - - def test_create_backup(self): - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - self.service.generate_random_name = mock.MagicMock( - return_value="backup") - - return_backup = self.service.create_backup(1) - - self.cinder.backups.create.assert_called_once_with( - 1, name="backup", description=None, container=None) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.backups.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_backup) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.create_backup") - - def test_create_backup_with_name(self): - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - - return_backup = self.service.create_backup(1, name="backup") - - self.cinder.backups.create.assert_called_once_with( - 1, name="backup", description=None, container=None) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.backups.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_backup) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.create_backup") - - def test_create_volume_type(self): - self.service.generate_random_name = mock.MagicMock( - return_value="volume_type") - - return_type = self.service.create_volume_type(name=None) - - self.cinder.volume_types.create.assert_called_once_with( - name="volume_type") - self.assertEqual(self.cinder.volume_types.create.return_value, - return_type) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.create_volume_type") - - def test_create_volume_type_with_name(self): - return_type = self.service.create_volume_type(name="volume_type") - - self.cinder.volume_types.create.assert_called_once_with( - name="volume_type") - self.assertEqual(self.cinder.volume_types.create.return_value, - return_type) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v1.create_volume_type") - - -class UnifiedCinderV1ServiceTestCase(test.TestCase): - def setUp(self): - super(UnifiedCinderV1ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.service = cinder_v1.UnifiedCinderV1Service(self.clients) - self.service._impl = mock.MagicMock() - - def test__unify_volume(self): - class SomeVolume(object): - id = 1 - display_name = "volume" - size = 1 - status = "st" - volume = self.service._unify_volume(SomeVolume()) - self.assertEqual(1, volume.id) - self.assertEqual("volume", volume.name) - self.assertEqual(1, volume.size) - self.assertEqual("st", volume.status) - - def test__unify_volume_with_dict(self): - some_volume = {"display_name": "volume", "id": 1, - "size": 1, "status": "st"} - volume = self.service._unify_volume(some_volume) - self.assertEqual(1, volume.id) - self.assertEqual("volume", volume.name) - self.assertEqual(1, volume.size) - self.assertEqual("st", volume.status) - - def test__unify_snapshot(self): - class SomeSnapshot(object): - id = 1 - display_name = "snapshot" - volume_id = "volume" - status = "st" - snapshot = self.service._unify_snapshot(SomeSnapshot()) - self.assertEqual(1, snapshot.id) - self.assertEqual("snapshot", snapshot.name) - self.assertEqual("volume", snapshot.volume_id) - self.assertEqual("st", snapshot.status) - - def test_create_volume(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual(self.service._unify_volume.return_value, - self.service.create_volume(1)) - self.service._impl.create_volume.assert_called_once_with( - 1, availability_zone=None, display_description=None, - display_name=None, imageRef=None, metadata=None, - project_id=None, snapshot_id=None, source_volid=None, - user_id=None, volume_type=None) - self.service._unify_volume.assert_called_once_with( - self.service._impl.create_volume.return_value) - - def test_list_volumes(self): - self.service._unify_volume = mock.MagicMock() - self.service._impl.list_volumes.return_value = ["vol"] - self.assertEqual([self.service._unify_volume.return_value], - self.service.list_volumes(detailed=True)) - self.service._impl.list_volumes.assert_called_once_with(detailed=True) - self.service._unify_volume.assert_called_once_with("vol") - - def test_get_volume(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual(self.service._unify_volume.return_value, - self.service.get_volume(1)) - self.service._impl.get_volume.assert_called_once_with(1) - self.service._unify_volume.assert_called_once_with( - self.service._impl.get_volume.return_value) - - def test_extend_volume(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual(self.service._unify_volume.return_value, - self.service.extend_volume("volume", new_size=1)) - self.service._impl.extend_volume.assert_called_once_with("volume", - new_size=1) - self.service._unify_volume.assert_called_once_with( - self.service._impl.extend_volume.return_value) - - def test_update_volume(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual( - self.service._unify_volume.return_value, - self.service.update_volume(1, name="volume", - description="fake")) - self.service._impl.update_volume.assert_called_once_with( - 1, display_description="fake", display_name="volume") - self.service._unify_volume.assert_called_once_with( - self.service._impl.update_volume.return_value) - - def test_list_types(self): - self.assertEqual( - self.service._impl.list_types.return_value, - self.service.list_types(search_opts=None)) - self.service._impl.list_types.assert_called_once_with( - search_opts=None) - - def test_create_snapshot(self): - self.service._unify_snapshot = mock.MagicMock() - self.assertEqual( - self.service._unify_snapshot.return_value, - self.service.create_snapshot(1, force=False, - name=None, - description=None)) - self.service._impl.create_snapshot.assert_called_once_with( - 1, force=False, display_name=None, display_description=None) - self.service._unify_snapshot.assert_called_once_with( - self.service._impl.create_snapshot.return_value) - - def test_list_snapshots(self): - self.service._unify_snapshot = mock.MagicMock() - self.service._impl.list_snapshots.return_value = ["snapshot"] - self.assertEqual([self.service._unify_snapshot.return_value], - self.service.list_snapshots(detailed=True)) - self.service._impl.list_snapshots.assert_called_once_with( - detailed=True) - self.service._unify_snapshot.assert_called_once_with( - "snapshot") - - def test_create_backup(self): - self.service._unify_backup = mock.MagicMock() - self.assertEqual( - self.service._unify_backup.return_value, - self.service.create_backup(1, container=None, - name=None, - description=None)) - self.service._impl.create_backup.assert_called_once_with( - 1, container=None, name=None, description=None) - self.service._unify_backup( - self.service._impl.create_backup.return_value) - - def test_create_volume_type(self): - self.assertEqual( - self.service._impl.create_volume_type.return_value, - self.service.create_volume_type(name="type")) - self.service._impl.create_volume_type.assert_called_once_with( - name="type") - - def test_restore_backup(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual(self.service._unify_volume.return_value, - self.service.restore_backup(1, volume_id=1)) - self.service._impl.restore_backup.assert_called_once_with(1, - volume_id=1) - self.service._unify_volume.assert_called_once_with( - self.service._impl.restore_backup.return_value) diff --git a/tests/unit/plugins/openstack/services/storage/test_cinder_v2.py b/tests/unit/plugins/openstack/services/storage/test_cinder_v2.py deleted file mode 100644 index c3857f29d7..0000000000 --- a/tests/unit/plugins/openstack/services/storage/test_cinder_v2.py +++ /dev/null @@ -1,416 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common import cfg -from rally.plugins.openstack.services.storage import cinder_v2 -from tests.unit import fakes -from tests.unit import test - -BASE_PATH = "rally.plugins.openstack.services.storage" -CONF = cfg.CONF - - -class CinderV2ServiceTestCase(test.ScenarioTestCase): - def setUp(self): - super(CinderV2ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.cinder = self.clients.cinder.return_value - self.name_generator = mock.MagicMock() - self.service = cinder_v2.CinderV2Service( - self.clients, name_generator=self.name_generator) - - def atomic_actions(self): - return self.service._atomic_actions - - def test_create_volume(self): - self.service.generate_random_name = mock.MagicMock( - return_value="volume") - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - - return_volume = self.service.create_volume(1) - - kwargs = {"name": "volume", - "description": None, - "consistencygroup_id": None, - "snapshot_id": None, - "source_volid": None, - "volume_type": None, - "user_id": None, - "project_id": None, - "availability_zone": None, - "metadata": None, - "imageRef": None, - "scheduler_hints": None, - "source_replica": None, - "multiattach": False} - self.cinder.volumes.create.assert_called_once_with(1, **kwargs) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.volumes.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_volume) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.create_volume") - - @mock.patch("%s.cinder_v2.random" % BASE_PATH) - def test_create_volume_with_size_range(self, mock_random): - mock_random.randint.return_value = 3 - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - - return_volume = self.service.create_volume( - size={"min": 1, "max": 5}, name="volume") - - kwargs = {"name": "volume", - "description": None, - "consistencygroup_id": None, - "snapshot_id": None, - "source_volid": None, - "volume_type": None, - "user_id": None, - "project_id": None, - "availability_zone": None, - "metadata": None, - "imageRef": None, - "scheduler_hints": None, - "source_replica": None, - "multiattach": False} - self.cinder.volumes.create.assert_called_once_with( - 3, **kwargs) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.volumes.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_volume) - - def test_update_volume(self): - return_value = {"volume": fakes.FakeVolume()} - self.cinder.volumes.update.return_value = return_value - - self.assertEqual(return_value["volume"], - self.service.update_volume(1)) - self.cinder.volumes.update.assert_called_once_with(1) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.update_volume") - - def test_update_volume_with_name_description(self): - return_value = {"volume": fakes.FakeVolume()} - self.cinder.volumes.update.return_value = return_value - - return_volume = self.service.update_volume( - 1, name="volume", description="fake") - - self.cinder.volumes.update.assert_called_once_with( - 1, name="volume", description="fake") - self.assertEqual(return_value["volume"], return_volume) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.update_volume") - - def test_list_types(self): - self.assertEqual(self.cinder.volume_types.list.return_value, - self.service.list_types(search_opts=None, - is_public=None)) - - self.cinder.volume_types.list.assert_called_once_with( - search_opts=None, is_public=None) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.list_types") - - def test_create_snapshot(self): - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - self.service.generate_random_name = mock.MagicMock( - return_value="snapshot") - - return_snapshot = self.service.create_snapshot(1) - - self.cinder.volume_snapshots.create.assert_called_once_with( - 1, name="snapshot", description=None, force=False, - metadata=None) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.volume_snapshots.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_snapshot) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.create_snapshot") - - def test_create_snapshot_with_name(self): - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - - return_snapshot = self.service.create_snapshot(1, name="snapshot") - - self.cinder.volume_snapshots.create.assert_called_once_with( - 1, name="snapshot", description=None, force=False, - metadata=None) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.volume_snapshots.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_snapshot) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.create_snapshot") - - def test_create_backup(self): - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - self.service.generate_random_name = mock.MagicMock( - return_value="backup") - - return_backup = self.service.create_backup(1) - - self.cinder.backups.create.assert_called_once_with( - 1, name="backup", description=None, container=None, - incremental=False, force=False, snapshot_id=None) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.backups.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_backup) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.create_backup") - - def test_create_backup_with_name(self): - self.service._wait_available_volume = mock.MagicMock() - self.service._wait_available_volume.return_value = fakes.FakeVolume() - - return_backup = self.service.create_backup(1, name="backup") - - self.cinder.backups.create.assert_called_once_with( - 1, name="backup", description=None, container=None, - incremental=False, force=False, snapshot_id=None) - self.service._wait_available_volume.assert_called_once_with( - self.cinder.backups.create.return_value) - self.assertEqual(self.service._wait_available_volume.return_value, - return_backup) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.create_backup") - - def test_create_volume_type(self): - self.service.generate_random_name = mock.MagicMock( - return_value="volume_type") - return_type = self.service.create_volume_type(name=None, - description=None, - is_public=True) - - self.cinder.volume_types.create.assert_called_once_with( - name="volume_type", description=None, is_public=True) - self.assertEqual(self.cinder.volume_types.create.return_value, - return_type) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.create_volume_type") - - def test_create_volume_type_with_name_(self): - return_type = self.service.create_volume_type(name="type", - description=None, - is_public=True) - - self.cinder.volume_types.create.assert_called_once_with( - name="type", description=None, is_public=True) - self.assertEqual(self.cinder.volume_types.create.return_value, - return_type) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.create_volume_type") - - def test_update_volume_type(self): - volume_type = mock.Mock() - name = "random_name" - self.service.generate_random_name = mock.MagicMock( - return_value=name) - description = "test update" - - result = self.service.update_volume_type(volume_type, - description=description, - name=None, - is_public=None) - self.assertEqual( - self.cinder.volume_types.update.return_value, - result) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.update_volume_type") - - def test_add_type_access(self): - volume_type = mock.Mock() - project = mock.Mock() - type_access = self.service.add_type_access(volume_type, - project=project) - add_project_access = self.cinder.volume_type_access.add_project_access - add_project_access.assert_called_once_with( - volume_type, project) - self.assertEqual(add_project_access.return_value, - type_access) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.add_type_access") - - def test_list_type_access(self): - volume_type = mock.Mock() - type_access = self.service.list_type_access(volume_type) - self.cinder.volume_type_access.list.assert_called_once_with( - volume_type) - self.assertEqual(self.cinder.volume_type_access.list.return_value, - type_access) - self._test_atomic_action_timer(self.atomic_actions(), - "cinder_v2.list_type_access") - - -class UnifiedCinderV2ServiceTestCase(test.TestCase): - def setUp(self): - super(UnifiedCinderV2ServiceTestCase, self).setUp() - self.clients = mock.MagicMock() - self.service = cinder_v2.UnifiedCinderV2Service(self.clients) - self.service._impl = mock.MagicMock() - - def test__unify_volume(self): - class SomeVolume(object): - id = 1 - name = "volume" - size = 1 - status = "st" - volume = self.service._unify_volume(SomeVolume()) - self.assertEqual(1, volume.id) - self.assertEqual("volume", volume.name) - self.assertEqual(1, volume.size) - self.assertEqual("st", volume.status) - - def test__unify_volume_with_dict(self): - some_volume = {"name": "volume", "id": 1, "size": 1, "status": "st"} - volume = self.service._unify_volume(some_volume) - self.assertEqual(1, volume.id) - self.assertEqual("volume", volume.name) - self.assertEqual(1, volume.size) - self.assertEqual("st", volume.status) - - def test__unify_snapshot(self): - class SomeSnapshot(object): - id = 1 - name = "snapshot" - volume_id = "volume" - status = "st" - snapshot = self.service._unify_snapshot(SomeSnapshot()) - self.assertEqual(1, snapshot.id) - self.assertEqual("snapshot", snapshot.name) - self.assertEqual("volume", snapshot.volume_id) - self.assertEqual("st", snapshot.status) - - def test_create_volume(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual(self.service._unify_volume.return_value, - self.service.create_volume(1)) - self.service._impl.create_volume.assert_called_once_with( - 1, availability_zone=None, consistencygroup_id=None, - description=None, imageRef=None, - metadata=None, multiattach=False, name=None, project_id=None, - scheduler_hints=None, snapshot_id=None, source_replica=None, - source_volid=None, user_id=None, volume_type=None) - self.service._unify_volume.assert_called_once_with( - self.service._impl.create_volume.return_value) - - def test_list_volumes(self): - self.service._unify_volume = mock.MagicMock() - self.service._impl.list_volumes.return_value = ["vol"] - self.assertEqual([self.service._unify_volume.return_value], - self.service.list_volumes(detailed=True)) - self.service._impl.list_volumes.assert_called_once_with(detailed=True) - self.service._unify_volume.assert_called_once_with("vol") - - def test_get_volume(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual(self.service._unify_volume.return_value, - self.service.get_volume(1)) - self.service._impl.get_volume.assert_called_once_with(1) - self.service._unify_volume.assert_called_once_with( - self.service._impl.get_volume.return_value) - - def test_extend_volume(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual(self.service._unify_volume.return_value, - self.service.extend_volume("volume", new_size=1)) - self.service._impl.extend_volume.assert_called_once_with("volume", - new_size=1) - self.service._unify_volume.assert_called_once_with( - self.service._impl.extend_volume.return_value) - - def test_update_volume(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual( - self.service._unify_volume.return_value, - self.service.update_volume(1, name="volume", - description="fake")) - self.service._impl.update_volume.assert_called_once_with( - 1, description="fake", name="volume") - self.service._unify_volume.assert_called_once_with( - self.service._impl.update_volume.return_value) - - def test_list_types(self): - self.assertEqual( - self.service._impl.list_types.return_value, - self.service.list_types(search_opts=None, is_public=True)) - self.service._impl.list_types.assert_called_once_with( - search_opts=None, is_public=True) - - def test_create_snapshot(self): - self.service._unify_snapshot = mock.MagicMock() - self.assertEqual( - self.service._unify_snapshot.return_value, - self.service.create_snapshot(1, force=False, - name=None, - description=None, - metadata=None)) - self.service._impl.create_snapshot.assert_called_once_with( - 1, force=False, name=None, description=None, metadata=None) - self.service._unify_snapshot.assert_called_once_with( - self.service._impl.create_snapshot.return_value) - - def test_list_snapshots(self): - self.service._unify_snapshot = mock.MagicMock() - self.service._impl.list_snapshots.return_value = ["snapshot"] - self.assertEqual([self.service._unify_snapshot.return_value], - self.service.list_snapshots(detailed=True)) - self.service._impl.list_snapshots.assert_called_once_with( - detailed=True) - self.service._unify_snapshot.assert_called_once_with( - "snapshot") - - def test_create_backup(self): - self.service._unify_backup = mock.MagicMock() - self.assertEqual( - self.service._unify_backup.return_value, - self.service.create_backup(1, container=None, - name=None, - description=None, - incremental=False, - force=False, - snapshot_id=None)) - self.service._impl.create_backup.assert_called_once_with( - 1, container=None, name=None, description=None, - incremental=False, force=False, snapshot_id=None) - self.service._unify_backup( - self.service._impl.create_backup.return_value) - - def test_create_volume_type(self): - self.assertEqual( - self.service._impl.create_volume_type.return_value, - self.service.create_volume_type(name="type", - description="desp", - is_public=True)) - self.service._impl.create_volume_type.assert_called_once_with( - name="type", description="desp", is_public=True) - - def test_restore_backup(self): - self.service._unify_volume = mock.MagicMock() - self.assertEqual(self.service._unify_volume.return_value, - self.service.restore_backup(1, volume_id=1)) - self.service._impl.restore_backup.assert_called_once_with(1, - volume_id=1) - self.service._unify_volume.assert_called_once_with( - self.service._impl.restore_backup.return_value) diff --git a/tests/unit/plugins/openstack/test_credential.py b/tests/unit/plugins/openstack/test_credential.py deleted file mode 100644 index 01c6433a30..0000000000 --- a/tests/unit/plugins/openstack/test_credential.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack import credential -from tests.unit import test - - -class OpenStackCredentialTestCase(test.TestCase): - - def setUp(self): - super(OpenStackCredentialTestCase, self).setUp() - self.credential = credential.OpenStackCredential( - "foo_url", "foo_user", "foo_password", - tenant_name="foo_tenant") - - def test_to_dict(self): - self.assertEqual({"auth_url": "foo_url", - "username": "foo_user", - "password": "foo_password", - "tenant_name": "foo_tenant", - "region_name": None, - "domain_name": None, - "permission": None, - "endpoint": None, - "endpoint_type": None, - "https_insecure": False, - "https_cacert": None, - "project_domain_name": None, - "user_domain_name": None, - "profiler_hmac_key": None, - "profiler_conn_str": None}, - self.credential.to_dict()) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_list_services(self, mock_clients): - mock_clients.return_value.services.return_value = {"compute": "nova", - "volume": "cinder"} - result = self.credential.list_services() - mock_clients.assert_called_once_with( - self.credential, api_info=None, cache={}) - mock_clients.return_value.services.assert_called_once_with() - self.assertEqual([{"name": "cinder", "type": "volume"}, - {"name": "nova", "type": "compute"}], result) - - @mock.patch("rally.plugins.openstack.osclients.Clients") - def test_clients(self, mock_clients): - clients = self.credential.clients(api_info="fake_info") - mock_clients.assert_called_once_with( - self.credential, api_info="fake_info", cache={}) - self.assertIs(mock_clients.return_value, clients) diff --git a/tests/unit/plugins/openstack/test_osclients.py b/tests/unit/plugins/openstack/test_osclients.py deleted file mode 100644 index 1c0ad545e8..0000000000 --- a/tests/unit/plugins/openstack/test_osclients.py +++ /dev/null @@ -1,946 +0,0 @@ -# Copyright 2013: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.common import cfg -from rally import consts -from rally import exceptions -from rally import osclients as deprecated_osclients # noqa -from rally.plugins.openstack import credential as oscredential -from rally.plugins.openstack import osclients -from tests.unit import fakes -from tests.unit import test - - -PATH = "rally.plugins.openstack.osclients" - - -@osclients.configure("dummy", supported_versions=("0.1", "1"), - default_service_type="bar") -class DummyClient(osclients.OSClient): - def create_client(self, *args, **kwargs): - pass - - -class OSClientTestCaseUtils(object): - - def set_up_keystone_mocks(self): - self.ksc_module = mock.MagicMock(__version__="2.0.0") - self.ksc_client = mock.MagicMock() - self.ksa_identity_plugin = mock.MagicMock() - self.ksa_password = mock.MagicMock( - return_value=self.ksa_identity_plugin) - self.ksa_identity = mock.MagicMock(Password=self.ksa_password) - - self.ksa_auth = mock.MagicMock() - self.ksa_session = mock.MagicMock() - self.patcher = mock.patch.dict("sys.modules", - {"keystoneclient": self.ksc_module, - "keystoneauth1": self.ksa_auth}) - self.patcher.start() - self.addCleanup(self.patcher.stop) - self.ksc_module.client = self.ksc_client - self.ksa_auth.identity = self.ksa_identity - self.ksa_auth.session = self.ksa_session - - def make_auth_args(self): - auth_kwargs = { - "auth_url": "http://auth_url/", "username": "user", - "password": "password", "tenant_name": "tenant", - "domain_name": "domain", "project_name": "project_name", - "project_domain_name": "project_domain_name", - "user_domain_name": "user_domain_name", - } - kwargs = {"https_insecure": False, "https_cacert": None} - kwargs.update(auth_kwargs) - return auth_kwargs, kwargs - - -@ddt.ddt -class OSClientTestCase(test.TestCase, OSClientTestCaseUtils): - - @ddt.data((0.1, True), (1, True), ("0.1", True), ("1", True), - (0.2, False), ("foo", False)) - @ddt.unpack - def test_validate_version(self, version, valid): - if valid: - DummyClient.validate_version(version) - else: - self.assertRaises(exceptions.ValidationError, - DummyClient.validate_version, version) - - def test_choose_service_type(self): - default_service_type = "default_service_type" - - @osclients.configure("test_choose_service_type", - default_service_type=default_service_type) - class FakeClient(osclients.OSClient): - create_client = mock.MagicMock() - - fake_client = FakeClient(mock.MagicMock(), {}, {}) - self.assertEqual(default_service_type, - fake_client.choose_service_type()) - self.assertEqual("foo", - fake_client.choose_service_type("foo")) - - @mock.patch("%s.Keystone.service_catalog" % PATH) - @ddt.data( - {"endpoint_type": None, "service_type": None, "region_name": None}, - {"endpoint_type": "et", "service_type": "st", "region_name": "rn"} - ) - @ddt.unpack - def test__get_endpoint(self, mock_keystone_service_catalog, endpoint_type, - service_type, region_name): - credential = oscredential.OpenStackCredential( - "http://auth_url/v2.0", "user", "pass", - endpoint_type=endpoint_type, - region_name=region_name) - mock_choose_service_type = mock.MagicMock() - osclient = osclients.OSClient(credential, {}, mock.MagicMock()) - osclient.choose_service_type = mock_choose_service_type - mock_url_for = mock_keystone_service_catalog.url_for - self.assertEqual(mock_url_for.return_value, - osclient._get_endpoint(service_type)) - call_args = { - "service_type": mock_choose_service_type.return_value, - "region_name": region_name} - if endpoint_type: - call_args["interface"] = endpoint_type - mock_url_for.assert_called_once_with(**call_args) - mock_choose_service_type.assert_called_once_with(service_type) - - @mock.patch("%s.Keystone.get_session" % PATH) - def test__get_session(self, mock_keystone_get_session): - osclient = osclients.OSClient(None, None, None) - auth_url = "auth_url" - version = "version" - import warnings - with mock.patch.object(warnings, "warn") as mock_warn: - self.assertEqual(mock_keystone_get_session.return_value, - osclient._get_session(auth_url, version)) - self.assertFalse(mock_warn.called) - mock_keystone_get_session.assert_called_once_with(version) - - -class CachedTestCase(test.TestCase): - - def test_cached(self): - clients = osclients.Clients(mock.MagicMock()) - client_name = "CachedTestCase.test_cached" - fake_client = osclients.configure(client_name)(osclients.OSClient)( - clients.credential, clients.api_info, clients.cache) - fake_client.create_client = mock.MagicMock() - - self.assertEqual({}, clients.cache) - fake_client() - self.assertEqual( - {client_name: fake_client.create_client.return_value}, - clients.cache) - fake_client.create_client.assert_called_once_with() - fake_client() - fake_client.create_client.assert_called_once_with() - fake_client("2") - self.assertEqual( - {client_name: fake_client.create_client.return_value, - "%s('2',)" % client_name: fake_client.create_client.return_value}, - clients.cache) - clients.clear() - self.assertEqual({}, clients.cache) - - -@ddt.ddt -class TestCreateKeystoneClient(test.TestCase, OSClientTestCaseUtils): - - def setUp(self): - super(TestCreateKeystoneClient, self).setUp() - self.credential = oscredential.OpenStackCredential( - "http://auth_url/v2.0", "user", "pass", "tenant") - - def test_create_client(self): - # NOTE(bigjools): This is a very poor testing strategy as it - # tightly couples the test implementation to the tested - # function's implementation. Ideally, we'd use a fake keystone - # but all that's happening here is that it's checking the right - # parameters were passed to the various parts that create a - # client. Hopefully one day we'll get a real fake from the - # keystone guys. - self.set_up_keystone_mocks() - keystone = osclients.Keystone(self.credential, {}, mock.MagicMock()) - keystone.get_session = mock.Mock( - return_value=(self.ksa_session, self.ksa_identity_plugin,)) - client = keystone.create_client(version=3) - - kwargs_session = self.credential.to_dict() - kwargs_session.update({ - "auth_url": "http://auth_url/", - "session": self.ksa_session, - "timeout": 180.0}) - keystone.get_session.assert_called_with() - called_with = self.ksc_client.Client.call_args_list[0][1] - self.assertEqual( - {"session": self.ksa_session, "timeout": 180.0, "version": "3"}, - called_with) - self.ksc_client.Client.assert_called_once_with( - session=self.ksa_session, timeout=180.0, version="3") - self.assertIs(client, self.ksc_client.Client()) - - def test_create_client_removes_url_path_if_version_specified(self): - # If specifying a version on the client creation call, ensure - # the auth_url is versionless and the version required is passed - # into the Client() call. - self.set_up_keystone_mocks() - auth_kwargs, all_kwargs = self.make_auth_args() - keystone = osclients.Keystone( - self.credential, {}, mock.MagicMock()) - keystone.get_session = mock.Mock( - return_value=(self.ksa_session, self.ksa_identity_plugin,)) - client = keystone.create_client(version="3") - - self.assertIs(client, self.ksc_client.Client()) - called_with = self.ksc_client.Client.call_args_list[0][1] - self.assertEqual( - {"session": self.ksa_session, "timeout": 180.0, "version": "3"}, - called_with) - - @ddt.data({"original": "https://example.com/identity/foo/v3", - "cropped": "https://example.com/identity/foo"}, - {"original": "https://example.com/identity/foo/v3/", - "cropped": "https://example.com/identity/foo"}, - {"original": "https://example.com/identity/foo/v2.0", - "cropped": "https://example.com/identity/foo"}, - {"original": "https://example.com/identity/foo/v2.0/", - "cropped": "https://example.com/identity/foo"}, - {"original": "https://example.com/identity/foo", - "cropped": "https://example.com/identity/foo"}) - @ddt.unpack - def test__remove_url_version(self, original, cropped): - credential = oscredential.OpenStackCredential( - original, "user", "pass", "tenant") - keystone = osclients.Keystone(credential, {}, {}) - self.assertEqual(cropped, keystone._remove_url_version()) - - @ddt.data("http://auth_url/v2.0", "http://auth_url/v3", - "http://auth_url/", "auth_url") - def test_keystone_get_session(self, auth_url): - credential = oscredential.OpenStackCredential( - auth_url, "user", "pass", "tenant") - self.set_up_keystone_mocks() - keystone = osclients.Keystone(credential, {}, {}) - - version_data = mock.Mock(return_value=[{"version": (1, 0)}]) - self.ksa_auth.discover.Discover.return_value = ( - mock.Mock(version_data=version_data)) - - self.assertEqual((self.ksa_session.Session.return_value, - self.ksa_identity_plugin), - keystone.get_session()) - if auth_url.endswith("v2.0"): - self.ksa_password.assert_called_once_with( - auth_url=auth_url, password="pass", - tenant_name="tenant", username="user") - else: - self.ksa_password.assert_called_once_with( - auth_url=auth_url, password="pass", - tenant_name="tenant", username="user", - domain_name=None, project_domain_name=None, - user_domain_name=None) - self.ksa_session.Session.assert_has_calls( - [mock.call(timeout=180.0, verify=True), - mock.call(auth=self.ksa_identity_plugin, timeout=180.0, - verify=True)]) - - def test_keystone_property(self): - keystone = osclients.Keystone(None, None, None) - self.assertRaises(exceptions.RallyException, lambda: keystone.keystone) - - @mock.patch("%s.Keystone.get_session" % PATH) - def test_auth_ref(self, mock_keystone_get_session): - session = mock.MagicMock() - auth_plugin = mock.MagicMock() - mock_keystone_get_session.return_value = (session, auth_plugin) - cache = {} - keystone = osclients.Keystone(None, None, cache) - - self.assertEqual(auth_plugin.get_access.return_value, - keystone.auth_ref) - self.assertEqual(auth_plugin.get_access.return_value, - cache["keystone_auth_ref"]) - - # check that auth_ref was cached. - keystone.auth_ref - mock_keystone_get_session.assert_called_once_with() - - @mock.patch("keystoneauth1.identity.base.BaseIdentityPlugin.get_access") - def test_auth_ref_fails(self, mock_get_access): - mock_get_access.side_effect = Exception - keystone = osclients.Keystone(self.credential, {}, {}) - - try: - keystone.auth_ref - except exceptions.AuthenticationFailed: - pass - else: - self.fail("keystone.auth_ref didn't raise" - " exceptions.AuthenticationFailed") - - @mock.patch("%s.LOG.exception" % PATH) - @mock.patch("%s.logging.is_debug" % PATH) - @mock.patch("keystoneauth1.identity.base.BaseIdentityPlugin.get_access") - def test_auth_ref_debug(self, mock_get_access, - mock_is_debug, mock_log_exception): - mock_is_debug.return_value = True - mock_get_access.side_effect = Exception - keystone = osclients.Keystone(self.credential, {}, {}) - - try: - keystone.auth_ref - except exceptions.AuthenticationFailed: - pass - else: - self.fail("keystone.auth_ref didn't raise" - " exceptions.AuthenticationFailed") - - mock_log_exception.assert_called_once_with(mock.ANY) - mock_is_debug.assert_called_once_with() - - -@ddt.ddt -class OSClientsTestCase(test.TestCase): - - def setUp(self): - super(OSClientsTestCase, self).setUp() - self.credential = oscredential.OpenStackCredential( - "http://auth_url/v2.0", "user", "pass", "tenant") - self.clients = osclients.Clients(self.credential, {}) - - self.fake_keystone = fakes.FakeKeystoneClient() - - keystone_patcher = mock.patch( - "%s.Keystone.create_client" % PATH, - return_value=self.fake_keystone) - self.mock_create_keystone_client = keystone_patcher.start() - - self.auth_ref_patcher = mock.patch("%s.Keystone.auth_ref" % PATH) - self.auth_ref = self.auth_ref_patcher.start() - - self.service_catalog = self.auth_ref.service_catalog - self.service_catalog.url_for = mock.MagicMock() - - def test_create_from_env(self): - with mock.patch.dict("os.environ", - {"OS_AUTH_URL": "foo_auth_url", - "OS_USERNAME": "foo_username", - "OS_PASSWORD": "foo_password", - "OS_TENANT_NAME": "foo_tenant_name", - "OS_REGION_NAME": "foo_region_name"}): - clients = osclients.Clients.create_from_env() - - self.assertEqual("foo_auth_url", clients.credential.auth_url) - self.assertEqual("foo_username", clients.credential.username) - self.assertEqual("foo_password", clients.credential.password) - self.assertEqual("foo_tenant_name", clients.credential.tenant_name) - self.assertEqual("foo_region_name", clients.credential.region_name) - - def test_keystone(self): - self.assertNotIn("keystone", self.clients.cache) - client = self.clients.keystone() - self.assertEqual(self.fake_keystone, client) - credential = {"timeout": cfg.CONF.openstack_client_http_timeout, - "insecure": False, "cacert": None} - kwargs = self.credential.to_dict() - kwargs.update(credential) - self.mock_create_keystone_client.assert_called_once_with() - self.assertEqual(self.fake_keystone, self.clients.cache["keystone"]) - - def test_keystone_versions(self): - self.clients.keystone.validate_version(2) - self.clients.keystone.validate_version(3) - - def test_keysonte_service_type(self): - self.assertRaises(exceptions.RallyException, - self.clients.keystone.is_service_type_configurable) - - def test_verified_keystone(self): - self.auth_ref.role_names = ["admin"] - self.assertEqual(self.mock_create_keystone_client.return_value, - self.clients.verified_keystone()) - - def test_verified_keystone_user_not_admin(self): - self.auth_ref.role_names = ["notadmin"] - self.assertRaises(exceptions.InvalidAdminException, - self.clients.verified_keystone) - - @mock.patch("%s.Keystone.get_session" % PATH) - def test_verified_keystone_authentication_fails(self, - mock_keystone_get_session): - self.auth_ref_patcher.stop() - mock_keystone_get_session.side_effect = ( - exceptions.AuthenticationFailed( - username=self.credential.username, - project=self.credential.tenant_name, - url=self.credential.auth_url, - etype=KeyError, - error="oops") - ) - self.assertRaises(exceptions.AuthenticationFailed, - self.clients.verified_keystone) - - @mock.patch("%s.Nova._get_endpoint" % PATH) - def test_nova(self, mock_nova__get_endpoint): - fake_nova = fakes.FakeNovaClient() - mock_nova__get_endpoint.return_value = "http://fake.to:2/fake" - mock_nova = mock.MagicMock() - mock_nova.client.Client.return_value = fake_nova - mock_keystoneauth1 = mock.MagicMock() - self.assertNotIn("nova", self.clients.cache) - with mock.patch.dict("sys.modules", - {"novaclient": mock_nova, - "keystoneauth1": mock_keystoneauth1}): - mock_keystoneauth1.discover.Discover.return_value = ( - mock.Mock(version_data=mock.Mock(return_value=[ - {"version": (2, 0)}])) - ) - client = self.clients.nova() - self.assertEqual(fake_nova, client) - kw = { - "version": "2", - "session": mock_keystoneauth1.session.Session(), - "endpoint_override": mock_nova__get_endpoint.return_value} - mock_nova.client.Client.assert_called_once_with(**kw) - self.assertEqual(fake_nova, self.clients.cache["nova"]) - - def test_nova_validate_version(self): - osclients.Nova.validate_version("2") - self.assertRaises(exceptions.RallyException, - osclients.Nova.validate_version, "foo") - - def test_nova_service_type(self): - self.clients.nova.is_service_type_configurable() - - @mock.patch("%s.Neutron._get_endpoint" % PATH) - def test_neutron(self, mock_neutron__get_endpoint): - fake_neutron = fakes.FakeNeutronClient() - mock_neutron__get_endpoint.return_value = "http://fake.to:2/fake" - mock_neutron = mock.MagicMock() - mock_keystoneauth1 = mock.MagicMock() - mock_neutron.client.Client.return_value = fake_neutron - self.assertNotIn("neutron", self.clients.cache) - with mock.patch.dict("sys.modules", - {"neutronclient.neutron": mock_neutron, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.neutron() - self.assertEqual(fake_neutron, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "endpoint_override": mock_neutron__get_endpoint.return_value} - mock_neutron.client.Client.assert_called_once_with("2.0", **kw) - self.assertEqual(fake_neutron, self.clients.cache["neutron"]) - - @mock.patch("%s.Neutron._get_endpoint" % PATH) - def test_neutron_endpoint_type(self, mock_neutron__get_endpoint): - fake_neutron = fakes.FakeNeutronClient() - mock_neutron__get_endpoint.return_value = "http://fake.to:2/fake" - mock_neutron = mock.MagicMock() - mock_keystoneauth1 = mock.MagicMock() - mock_neutron.client.Client.return_value = fake_neutron - self.assertNotIn("neutron", self.clients.cache) - self.credential["endpoint_type"] = "internal" - with mock.patch.dict("sys.modules", - {"neutronclient.neutron": mock_neutron, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.neutron() - self.assertEqual(fake_neutron, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "endpoint_override": mock_neutron__get_endpoint.return_value, - "endpoint_type": "internal"} - mock_neutron.client.Client.assert_called_once_with("2.0", **kw) - self.assertEqual(fake_neutron, self.clients.cache["neutron"]) - - @mock.patch("%s.Heat._get_endpoint" % PATH) - def test_heat(self, mock_heat__get_endpoint): - fake_heat = fakes.FakeHeatClient() - mock_heat__get_endpoint.return_value = "http://fake.to:2/fake" - mock_heat = mock.MagicMock() - mock_keystoneauth1 = mock.MagicMock() - mock_heat.client.Client.return_value = fake_heat - self.assertNotIn("heat", self.clients.cache) - with mock.patch.dict("sys.modules", - {"heatclient": mock_heat, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.heat() - self.assertEqual(fake_heat, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "endpoint_override": mock_heat__get_endpoint.return_value} - mock_heat.client.Client.assert_called_once_with("1", **kw) - self.assertEqual(fake_heat, self.clients.cache["heat"]) - - @mock.patch("%s.Heat._get_endpoint" % PATH) - def test_heat_endpoint_type_interface(self, mock_heat__get_endpoint): - fake_heat = fakes.FakeHeatClient() - mock_heat__get_endpoint.return_value = "http://fake.to:2/fake" - mock_heat = mock.MagicMock() - mock_keystoneauth1 = mock.MagicMock() - mock_heat.client.Client.return_value = fake_heat - self.assertNotIn("heat", self.clients.cache) - self.credential["endpoint_type"] = "internal" - with mock.patch.dict("sys.modules", - {"heatclient": mock_heat, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.heat() - self.assertEqual(fake_heat, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "endpoint_override": mock_heat__get_endpoint.return_value, - "interface": "internal"} - mock_heat.client.Client.assert_called_once_with("1", **kw) - self.assertEqual(fake_heat, self.clients.cache["heat"]) - - @mock.patch("%s.Glance._get_endpoint" % PATH) - def test_glance(self, mock_glance__get_endpoint): - fake_glance = fakes.FakeGlanceClient() - mock_glance = mock.MagicMock() - mock_glance__get_endpoint.return_value = "http://fake.to:2/fake" - mock_keystoneauth1 = mock.MagicMock() - mock_glance.Client = mock.MagicMock(return_value=fake_glance) - with mock.patch.dict("sys.modules", - {"glanceclient": mock_glance, - "keystoneauth1": mock_keystoneauth1}): - self.assertNotIn("glance", self.clients.cache) - client = self.clients.glance() - self.assertEqual(fake_glance, client) - kw = { - "version": "2", - "session": mock_keystoneauth1.session.Session(), - "endpoint_override": mock_glance__get_endpoint.return_value} - mock_glance.Client.assert_called_once_with(**kw) - self.assertEqual(fake_glance, self.clients.cache["glance"]) - - @mock.patch("%s.Cinder._get_endpoint" % PATH) - def test_cinder(self, mock_cinder__get_endpoint): - fake_cinder = mock.MagicMock(client=fakes.FakeCinderClient()) - mock_cinder = mock.MagicMock() - mock_cinder.client.Client.return_value = fake_cinder - mock_cinder__get_endpoint.return_value = "http://fake.to:2/fake" - mock_keystoneauth1 = mock.MagicMock() - self.assertNotIn("cinder", self.clients.cache) - with mock.patch.dict("sys.modules", - {"cinderclient": mock_cinder, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.cinder() - self.assertEqual(fake_cinder, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "endpoint_override": mock_cinder__get_endpoint.return_value} - mock_cinder.client.Client.assert_called_once_with( - "2", **kw) - self.assertEqual(fake_cinder, self.clients.cache["cinder"]) - - @mock.patch("%s.Manila._get_endpoint" % PATH) - def test_manila(self, mock_manila__get_endpoint): - mock_manila = mock.MagicMock() - mock_manila__get_endpoint.return_value = "http://fake.to:2/fake" - mock_keystoneauth1 = mock.MagicMock() - self.assertNotIn("manila", self.clients.cache) - with mock.patch.dict("sys.modules", - {"manilaclient": mock_manila, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.manila() - self.assertEqual(mock_manila.client.Client.return_value, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "service_catalog_url": mock_manila__get_endpoint.return_value - } - mock_manila.client.Client.assert_called_once_with("1", **kw) - self.assertEqual( - mock_manila.client.Client.return_value, - self.clients.cache["manila"]) - - def test_manila_validate_version(self): - osclients.Manila.validate_version("2.0") - osclients.Manila.validate_version("2.32") - self.assertRaises(exceptions.RallyException, - osclients.Manila.validate_version, "foo") - - @mock.patch("%s.Ceilometer._get_endpoint" % PATH) - def test_ceilometer(self, mock_ceilometer__get_endpoint): - fake_ceilometer = fakes.FakeCeilometerClient() - mock_ceilometer = mock.MagicMock() - mock_ceilometer__get_endpoint.return_value = "http://fake.to:2/fake" - mock_keystoneauth1 = mock.MagicMock() - mock_ceilometer.client.get_client = mock.MagicMock( - return_value=fake_ceilometer) - self.assertNotIn("ceilometer", self.clients.cache) - with mock.patch.dict("sys.modules", - {"ceilometerclient": mock_ceilometer, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.ceilometer() - self.assertEqual(fake_ceilometer, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "endpoint_override": mock_ceilometer__get_endpoint.return_value - } - mock_ceilometer.client.get_client.assert_called_once_with("2", - **kw) - self.assertEqual(fake_ceilometer, - self.clients.cache["ceilometer"]) - - def test_gnocchi(self): - fake_gnocchi = fakes.FakeGnocchiClient() - mock_gnocchi = mock.MagicMock() - mock_gnocchi.client.Client.return_value = fake_gnocchi - mock_keystoneauth1 = mock.MagicMock() - self.assertNotIn("gnocchi", self.clients.cache) - with mock.patch.dict("sys.modules", - {"gnocchiclient": mock_gnocchi, - "keystoneauth1": mock_keystoneauth1}): - mock_keystoneauth1.discover.Discover.return_value = ( - mock.Mock(version_data=mock.Mock(return_value=[ - {"version": (1, 0)}])) - ) - client = self.clients.gnocchi() - - self.assertEqual(fake_gnocchi, client) - kw = {"version": "1", - "session": mock_keystoneauth1.session.Session(), - "adapter_options": {"service_type": "metric"}} - mock_gnocchi.client.Client.assert_called_once_with(**kw) - self.assertEqual(fake_gnocchi, self.clients.cache["gnocchi"]) - - def test_monasca(self): - fake_monasca = fakes.FakeMonascaClient() - mock_monasca = mock.MagicMock() - mock_monasca.client.Client.return_value = fake_monasca - self.assertNotIn("monasca", self.clients.cache) - with mock.patch.dict("sys.modules", - {"monascaclient": mock_monasca}): - client = self.clients.monasca() - self.assertEqual(fake_monasca, client) - self.service_catalog.url_for.assert_called_once_with( - service_type="monitoring", - region_name=self.credential.region_name) - os_endpoint = self.service_catalog.url_for.return_value - kw = {"token": self.auth_ref.auth_token, - "timeout": cfg.CONF.openstack_client_http_timeout, - "insecure": False, "cacert": None, - "username": self.credential.username, - "password": self.credential.password, - "tenant_name": self.credential.tenant_name, - "auth_url": self.credential.auth_url - } - mock_monasca.client.Client.assert_called_once_with("2_0", - os_endpoint, - **kw) - self.assertEqual(mock_monasca.client.Client.return_value, - self.clients.cache["monasca"]) - - @mock.patch("%s.Ironic._get_endpoint" % PATH) - def test_ironic(self, mock_ironic__get_endpoint): - fake_ironic = fakes.FakeIronicClient() - mock_ironic = mock.MagicMock() - mock_ironic.client.get_client = mock.MagicMock( - return_value=fake_ironic) - mock_ironic__get_endpoint.return_value = "http://fake.to:2/fake" - mock_keystoneauth1 = mock.MagicMock() - self.assertNotIn("ironic", self.clients.cache) - with mock.patch.dict("sys.modules", - {"ironicclient": mock_ironic, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.ironic() - self.assertEqual(fake_ironic, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "endpoint": mock_ironic__get_endpoint.return_value} - mock_ironic.client.get_client.assert_called_once_with("1", **kw) - self.assertEqual(fake_ironic, self.clients.cache["ironic"]) - - @mock.patch("%s.Sahara._get_endpoint" % PATH) - def test_sahara(self, mock_sahara__get_endpoint): - fake_sahara = fakes.FakeSaharaClient() - mock_sahara = mock.MagicMock() - mock_sahara.client.Client = mock.MagicMock(return_value=fake_sahara) - mock_sahara__get_endpoint.return_value = "http://fake.to:2/fake" - mock_keystoneauth1 = mock.MagicMock() - self.assertNotIn("sahara", self.clients.cache) - with mock.patch.dict("sys.modules", - {"saharaclient": mock_sahara, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.sahara() - self.assertEqual(fake_sahara, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "sahara_url": mock_sahara__get_endpoint.return_value} - mock_sahara.client.Client.assert_called_once_with(1.1, **kw) - self.assertEqual(fake_sahara, self.clients.cache["sahara"]) - - def test_zaqar(self): - fake_zaqar = fakes.FakeZaqarClient() - mock_zaqar = mock.MagicMock() - mock_zaqar.client.Client = mock.MagicMock(return_value=fake_zaqar) - self.assertNotIn("zaqar", self.clients.cache) - mock_keystoneauth1 = mock.MagicMock() - with mock.patch.dict("sys.modules", {"zaqarclient.queues": - mock_zaqar, - "keystoneauth1": - mock_keystoneauth1}): - client = self.clients.zaqar() - self.assertEqual(fake_zaqar, client) - self.service_catalog.url_for.assert_called_once_with( - service_type="messaging", - region_name=self.credential.region_name) - fake_zaqar_url = self.service_catalog.url_for.return_value - mock_zaqar.client.Client.assert_called_once_with( - url=fake_zaqar_url, version=1.1, - session=mock_keystoneauth1.session.Session()) - self.assertEqual(fake_zaqar, self.clients.cache["zaqar"], - mock_keystoneauth1.session.Session()) - - @mock.patch("%s.Trove._get_endpoint" % PATH) - def test_trove(self, mock_trove__get_endpoint): - fake_trove = fakes.FakeTroveClient() - mock_trove = mock.MagicMock() - mock_trove.client.Client = mock.MagicMock(return_value=fake_trove) - mock_trove__get_endpoint.return_value = "http://fake.to:2/fake" - mock_keystoneauth1 = mock.MagicMock() - self.assertNotIn("trove", self.clients.cache) - with mock.patch.dict("sys.modules", - {"troveclient": mock_trove, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.trove() - self.assertEqual(fake_trove, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "endpoint": mock_trove__get_endpoint.return_value} - mock_trove.client.Client.assert_called_once_with("1.0", **kw) - self.assertEqual(fake_trove, self.clients.cache["trove"]) - - def test_mistral(self): - fake_mistral = fakes.FakeMistralClient() - mock_mistral = mock.Mock() - mock_mistral.client.client.return_value = fake_mistral - - self.assertNotIn("mistral", self.clients.cache) - with mock.patch.dict( - "sys.modules", {"mistralclient": mock_mistral, - "mistralclient.api": mock_mistral}): - client = self.clients.mistral() - self.assertEqual(fake_mistral, client) - self.service_catalog.url_for.assert_called_once_with( - service_type="workflowv2", - region_name=self.credential.region_name - ) - fake_mistral_url = self.service_catalog.url_for.return_value - mock_mistral.client.client.assert_called_once_with( - mistral_url=fake_mistral_url, - service_type="workflowv2", - auth_token=self.auth_ref.auth_token - ) - self.assertEqual(fake_mistral, self.clients.cache["mistral"]) - - def test_swift(self): - fake_swift = fakes.FakeSwiftClient() - mock_swift = mock.MagicMock() - mock_swift.client.Connection = mock.MagicMock(return_value=fake_swift) - self.assertNotIn("swift", self.clients.cache) - with mock.patch.dict("sys.modules", {"swiftclient": mock_swift}): - client = self.clients.swift() - self.assertEqual(fake_swift, client) - self.service_catalog.url_for.assert_called_once_with( - service_type="object-store", - region_name=self.credential.region_name) - kw = {"retries": 1, - "preauthurl": self.service_catalog.url_for.return_value, - "preauthtoken": self.auth_ref.auth_token, - "insecure": False, - "cacert": None, - "user": self.credential.username, - "tenant_name": self.credential.tenant_name, - } - mock_swift.client.Connection.assert_called_once_with(**kw) - self.assertEqual(fake_swift, self.clients.cache["swift"]) - - @mock.patch("%s.EC2._get_endpoint" % PATH) - def test_ec2(self, mock_ec2__get_endpoint): - mock_boto = mock.Mock() - self.fake_keystone.ec2 = mock.Mock() - self.fake_keystone.ec2.create.return_value = mock.Mock( - access="fake_access", secret="fake_secret") - mock_ec2__get_endpoint.return_value = "http://fake.to:1/fake" - fake_ec2 = fakes.FakeEC2Client() - mock_boto.connect_ec2_endpoint.return_value = fake_ec2 - - self.assertNotIn("ec2", self.clients.cache) - with mock.patch.dict("sys.modules", {"boto": mock_boto}): - client = self.clients.ec2() - - self.assertEqual(fake_ec2, client) - kw = { - "url": "http://fake.to:1/fake", - "aws_access_key_id": "fake_access", - "aws_secret_access_key": "fake_secret", - "is_secure": self.credential.insecure, - } - mock_boto.connect_ec2_endpoint.assert_called_once_with(**kw) - self.assertEqual(fake_ec2, self.clients.cache["ec2"]) - - @mock.patch("%s.Keystone.service_catalog" % PATH) - def test_services(self, mock_keystone_service_catalog): - available_services = {consts.ServiceType.IDENTITY: {}, - consts.ServiceType.COMPUTE: {}, - "some_service": {}} - mock_get_endpoints = mock_keystone_service_catalog.get_endpoints - mock_get_endpoints.return_value = available_services - clients = osclients.Clients(self.credential) - - self.assertEqual( - {consts.ServiceType.IDENTITY: consts.Service.KEYSTONE, - consts.ServiceType.COMPUTE: consts.Service.NOVA, - "some_service": "__unknown__"}, - clients.services()) - - def test_murano(self): - fake_murano = fakes.FakeMuranoClient() - mock_murano = mock.Mock() - mock_murano.client.Client.return_value = fake_murano - self.assertNotIn("murano", self.clients.cache) - with mock.patch.dict("sys.modules", {"muranoclient": mock_murano}): - client = self.clients.murano() - self.assertEqual(fake_murano, client) - self.service_catalog.url_for.assert_called_once_with( - service_type="application-catalog", - region_name=self.credential.region_name - ) - kw = {"endpoint": self.service_catalog.url_for.return_value, - "token": self.auth_ref.auth_token} - mock_murano.client.Client.assert_called_once_with("1", **kw) - self.assertEqual(fake_murano, self.clients.cache["murano"]) - - @mock.patch("%s.Keystone.get_session" % PATH) - @ddt.data( - {}, - {"version": "2"}, - {"version": "1"}, - {"version": None} - ) - @ddt.unpack - def test_designate(self, mock_keystone_get_session, version=None): - fake_designate = fakes.FakeDesignateClient() - mock_designate = mock.Mock() - mock_designate.client.Client.return_value = fake_designate - - mock_keystone_get_session.return_value = ("fake_session", - "fake_auth_plugin") - - self.assertNotIn("designate", self.clients.cache) - with mock.patch.dict("sys.modules", - {"designateclient": mock_designate}): - if version is not None: - client = self.clients.designate(version=version) - else: - client = self.clients.designate() - self.assertEqual(fake_designate, client) - self.service_catalog.url_for.assert_called_once_with( - service_type="dns", - region_name=self.credential.region_name - ) - - default = version or "1" - - # Check that we append /v - url = self.service_catalog.url_for.return_value - url.__iadd__.assert_called_once_with("/v%s" % default) - - mock_keystone_get_session.assert_called_once_with() - - if version == "2": - mock_designate.client.Client.assert_called_once_with( - version, - endpoint_override=url.__iadd__.return_value, - session="fake_session") - elif version == "1": - mock_designate.client.Client.assert_called_once_with( - version, - endpoint=url.__iadd__.return_value, - session="fake_session") - - key = "designate" - if version is not None: - key += "%s" % {"version": version} - self.assertEqual(fake_designate, self.clients.cache[key]) - - def test_senlin(self): - mock_senlin = mock.MagicMock() - self.assertNotIn("senlin", self.clients.cache) - with mock.patch.dict("sys.modules", {"senlinclient": mock_senlin}): - client = self.clients.senlin() - self.assertEqual(mock_senlin.client.Client.return_value, client) - mock_senlin.client.Client.assert_called_once_with( - "1", - username=self.credential.username, - password=self.credential.password, - project_name=self.credential.tenant_name, - cert=self.credential.cacert, - auth_url=self.credential.auth_url) - self.assertEqual( - mock_senlin.client.Client.return_value, - self.clients.cache["senlin"]) - - @mock.patch("%s.Magnum._get_endpoint" % PATH) - def test_magnum(self, mock_magnum__get_endpoint): - fake_magnum = fakes.FakeMagnumClient() - mock_magnum = mock.MagicMock() - mock_magnum.client.Client.return_value = fake_magnum - - mock_magnum__get_endpoint.return_value = "http://fake.to:2/fake" - mock_keystoneauth1 = mock.MagicMock() - - self.assertNotIn("magnum", self.clients.cache) - with mock.patch.dict("sys.modules", - {"magnumclient": mock_magnum, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.magnum() - - self.assertEqual(fake_magnum, client) - kw = { - "interface": self.credential.endpoint_type, - "session": mock_keystoneauth1.session.Session(), - "magnum_url": mock_magnum__get_endpoint.return_value} - - mock_magnum.client.Client.assert_called_once_with(**kw) - self.assertEqual(fake_magnum, self.clients.cache["magnum"]) - - @mock.patch("%s.Watcher._get_endpoint" % PATH) - def test_watcher(self, mock_watcher__get_endpoint): - fake_watcher = fakes.FakeWatcherClient() - mock_watcher = mock.MagicMock() - mock_watcher__get_endpoint.return_value = "http://fake.to:2/fake" - mock_keystoneauth1 = mock.MagicMock() - mock_watcher.client.Client.return_value = fake_watcher - self.assertNotIn("watcher", self.clients.cache) - with mock.patch.dict("sys.modules", - {"watcherclient": mock_watcher, - "keystoneauth1": mock_keystoneauth1}): - client = self.clients.watcher() - - self.assertEqual(fake_watcher, client) - kw = { - "session": mock_keystoneauth1.session.Session(), - "endpoint": mock_watcher__get_endpoint.return_value} - - mock_watcher.client.Client.assert_called_once_with("1", **kw) - self.assertEqual(fake_watcher, self.clients.cache["watcher"]) diff --git a/tests/unit/plugins/openstack/test_scenario.py b/tests/unit/plugins/openstack/test_scenario.py deleted file mode 100644 index 2527709ac4..0000000000 --- a/tests/unit/plugins/openstack/test_scenario.py +++ /dev/null @@ -1,169 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import fixtures -import mock - -from rally.plugins.openstack.credential import OpenStackCredential -from rally.plugins.openstack import scenario as base_scenario -from tests.unit import test - - -CREDENTIAL_WITHOUT_HMAC = OpenStackCredential( - "auth_url", - "username", - "password") - -CREDENTIAL_WITH_HMAC = OpenStackCredential( - "auth_url", - "username", - "password", - profiler_hmac_key="test_profiler_hmac_key") - - -@ddt.ddt -class OpenStackScenarioTestCase(test.TestCase): - def setUp(self): - super(OpenStackScenarioTestCase, self).setUp() - self.osclients = fixtures.MockPatch( - "rally.plugins.openstack.osclients.Clients") - self.useFixture(self.osclients) - self.context = test.get_test_context() - self.context.update({"foo": "bar"}) - - def test_init(self): - scenario = base_scenario.OpenStackScenario(self.context) - self.assertEqual(self.context, scenario.context) - - def test_init_admin_context(self): - self.context["admin"] = {"credential": mock.Mock()} - scenario = base_scenario.OpenStackScenario(self.context) - self.assertEqual(self.context, scenario.context) - self.osclients.mock.assert_called_once_with( - self.context["admin"]["credential"], {}) - - scenario = base_scenario.OpenStackScenario( - self.context, admin_clients="foobar") - - def test_init_admin_clients(self): - scenario = base_scenario.OpenStackScenario( - self.context, admin_clients="foobar") - self.assertEqual(self.context, scenario.context) - - self.assertEqual("foobar", scenario._admin_clients) - - def test_init_user_context(self): - user = {"credential": mock.Mock(), "tenant_id": "foo"} - self.context["users"] = [user] - self.context["tenants"] = {"foo": {"name": "bar"}} - self.context["user_choice_method"] = "random" - - scenario = base_scenario.OpenStackScenario(self.context) - - self.assertEqual(user, scenario.context["user"]) - self.assertEqual(self.context["tenants"]["foo"], - scenario.context["tenant"]) - - self.osclients.mock.assert_called_once_with(user["credential"], {}) - - def test_init_clients(self): - scenario = base_scenario.OpenStackScenario(self.context, - admin_clients="spam", - clients="ham") - self.assertEqual("spam", scenario._admin_clients) - self.assertEqual("ham", scenario._clients) - - def test_init_user_clients(self): - scenario = base_scenario.OpenStackScenario( - self.context, clients="foobar") - self.assertEqual(self.context, scenario.context) - - self.assertEqual("foobar", scenario._clients) - - @ddt.data(([], 0), - ([("admin", CREDENTIAL_WITHOUT_HMAC)], 0), - ([("user", CREDENTIAL_WITHOUT_HMAC)], 0), - ([("admin", CREDENTIAL_WITH_HMAC)], 1), - ([("user", CREDENTIAL_WITH_HMAC)], 1), - ([("admin", CREDENTIAL_WITH_HMAC), - ("user", CREDENTIAL_WITH_HMAC)], 1), - ([("admin", CREDENTIAL_WITHOUT_HMAC), - ("user", CREDENTIAL_WITH_HMAC)], 1), - ([("admin", CREDENTIAL_WITH_HMAC), - ("user", CREDENTIAL_WITHOUT_HMAC)], 1), - ([("admin", CREDENTIAL_WITHOUT_HMAC), - ("user", CREDENTIAL_WITHOUT_HMAC)], 0)) - @ddt.unpack - @mock.patch("rally.plugins.openstack.scenario.profiler.init") - @mock.patch("rally.plugins.openstack.scenario.profiler.get") - def test_profiler_init(self, users_credentials, - expected_call_count, - mock_profiler_get, - mock_profiler_init): - for user, credential in users_credentials: - self.context.update({user: {"credential": credential}}) - base_scenario.OpenStackScenario(self.context) - self.assertEqual(expected_call_count, - mock_profiler_init.call_count) - self.assertEqual([mock.call()] * expected_call_count, - mock_profiler_get.call_args_list) - - def test__choose_user_random(self): - users = [{"credential": mock.Mock(), "tenant_id": "foo"} - for _ in range(5)] - self.context["users"] = users - self.context["tenants"] = {"foo": {"name": "bar"}, - "baz": {"name": "spam"}} - self.context["user_choice_method"] = "random" - - scenario = base_scenario.OpenStackScenario() - scenario._choose_user(self.context) - self.assertIn("user", self.context) - self.assertIn(self.context["user"], self.context["users"]) - self.assertIn("tenant", self.context) - tenant_id = self.context["user"]["tenant_id"] - self.assertEqual(self.context["tenants"][tenant_id], - self.context["tenant"]) - - @ddt.data((1, "0", "bar"), - (2, "0", "foo"), - (3, "1", "bar"), - (4, "1", "foo"), - (5, "0", "bar"), - (6, "0", "foo"), - (7, "1", "bar"), - (8, "1", "foo")) - @ddt.unpack - def test__choose_user_round_robin(self, iteration, - expected_user_id, expected_tenant_id): - self.context["iteration"] = iteration - self.context["user_choice_method"] = "round_robin" - self.context["users"] = [] - self.context["tenants"] = {} - for tid in ("foo", "bar"): - users = [{"id": str(i), "tenant_id": tid} for i in range(2)] - self.context["users"] += users - self.context["tenants"][tid] = {"name": tid, "users": users} - - scenario = base_scenario.OpenStackScenario() - scenario._choose_user(self.context) - self.assertIn("user", self.context) - self.assertIn(self.context["user"], self.context["users"]) - self.assertEqual(expected_user_id, self.context["user"]["id"]) - self.assertIn("tenant", self.context) - tenant_id = self.context["user"]["tenant_id"] - self.assertEqual(self.context["tenants"][tenant_id], - self.context["tenant"]) - self.assertEqual(expected_tenant_id, tenant_id) diff --git a/tests/unit/plugins/openstack/test_service.py b/tests/unit/plugins/openstack/test_service.py deleted file mode 100644 index 24ace33782..0000000000 --- a/tests/unit/plugins/openstack/test_service.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.plugins.openstack import service -from rally.task import service as base_service -from tests.unit import test - - -class DiscoverTestCase(test.TestCase): - def test_discover_network_impl_based_on_service(self): - - class SomeService(base_service.UnifiedService): - pass - - @service.service("neutron", "network", version="2") - class NeutronV2Service(service.Service): - pass - - @service.compat_layer(NeutronV2Service) - class UnifiedNeutronV2Service(SomeService): - pass - - clients = mock.MagicMock() - clients.neutron.choose_version.return_value = "2" - - clients.services.return_value = {} - - clients.services.return_value = {"network": "neutron"} - self.assertIsInstance(SomeService(clients)._impl, - UnifiedNeutronV2Service) diff --git a/tests/unit/plugins/openstack/test_types.py b/tests/unit/plugins/openstack/test_types.py deleted file mode 100644 index 1be0c38e31..0000000000 --- a/tests/unit/plugins/openstack/test_types.py +++ /dev/null @@ -1,408 +0,0 @@ -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally import exceptions -from rally.plugins.openstack import types -from tests.unit import fakes -from tests.unit import test - - -class FlavorTestCase(test.TestCase): - - def setUp(self): - super(FlavorTestCase, self).setUp() - self.clients = fakes.FakeClients() - self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.tiny", - id="1")) - self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.nano", - id="42")) - self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.large", - id="44")) - self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.large", - id="45")) - self.type_cls = types.Flavor( - context={"admin": {"credential": mock.Mock()}}) - self.type_cls._clients = self.clients - - def test_preprocess_by_id(self): - resource_spec = {"id": "42"} - flavor_id = self.type_cls.pre_process( - resource_spec=resource_spec, config={}) - self.assertEqual("42", flavor_id) - - def test_preprocess_by_name(self): - resource_spec = {"name": "m1.nano"} - flavor_id = self.type_cls.pre_process( - resource_spec=resource_spec, config={}) - self.assertEqual("42", flavor_id) - - def test_preprocess_by_name_no_match(self): - resource_spec = {"name": "m1.medium"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_name_multiple_match(self): - resource_spec = {"name": "m1.large"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_regex(self): - resource_spec = {"regex": "m(1|2)\.nano"} - flavor_id = self.type_cls.pre_process( - resource_spec=resource_spec, config={}) - self.assertEqual("42", flavor_id) - - def test_preprocess_by_regex_multiple_match(self): - resource_spec = {"regex": "^m1"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_regex_no_match(self): - resource_spec = {} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - -class EC2FlavorTestCase(test.TestCase): - - def setUp(self): - super(EC2FlavorTestCase, self).setUp() - self.clients = fakes.FakeClients() - self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.tiny", - id="1")) - self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.nano", - id="2")) - self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.large", - id="3")) - self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.xlarge", - id="3")) - self.type_cls = types.EC2Flavor( - context={"admin": {"credential": mock.Mock()}}) - self.type_cls._clients = self.clients - - def test_preprocess_by_name(self): - resource_spec = {"name": "m1.nano"} - flavor_name = self.type_cls.pre_process( - resource_spec=resource_spec, config={}) - self.assertEqual("m1.nano", flavor_name) - - def test_preprocess_by_id(self): - resource_spec = {"id": "2"} - flavor_name = self.type_cls.pre_process( - resource_spec=resource_spec, config={}) - self.assertEqual("m1.nano", flavor_name) - - def test_preprocess_by_id_no_match(self): - resource_spec = {"id": "4"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - -class GlanceImageTestCase(test.TestCase): - - def setUp(self): - super(GlanceImageTestCase, self).setUp() - self.clients = fakes.FakeClients() - image1 = fakes.FakeResource(name="cirros-0.3.4-uec", id="100") - self.clients.glance().images._cache(image1) - image2 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk", id="101") - self.clients.glance().images._cache(image2) - image3 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy", - id="102") - self.clients.glance().images._cache(image3) - image4 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy", - id="103") - self.clients.glance().images._cache(image4) - self.type_cls = types.GlanceImage( - context={"admin": {"credential": mock.Mock()}}) - self.type_cls._clients = self.clients - - def test_preprocess_by_id(self): - resource_spec = {"id": "100"} - image_id = self.type_cls.pre_process( - resource_spec=resource_spec, config={}) - self.assertEqual("100", image_id) - - def test_preprocess_by_name(self): - resource_spec = {"name": "^cirros-0.3.4-uec$"} - image_id = self.type_cls.pre_process( - resource_spec=resource_spec, config={}) - self.assertEqual("100", image_id) - - def test_preprocess_by_name_no_match(self): - resource_spec = {"name": "cirros-0.3.4-uec-boot"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_name_match_multiple(self): - resource_spec = {"name": "cirros-0.3.4-uec-ramdisk-copy"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_regex(self): - resource_spec = {"regex": "-uec$"} - image_id = self.type_cls.pre_process( - resource_spec=resource_spec, config={}) - self.assertEqual("100", image_id) - - def test_preprocess_by_regex_match_multiple(self): - resource_spec = {"regex": "^cirros"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_regex_no_match(self): - resource_spec = {"regex": "-boot$"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - -class GlanceImageArgsTestCase(test.TestCase): - - def test_preprocess(self): - self.assertEqual( - {}, - types.GlanceImageArguments({}).pre_process( - resource_spec={}, config={})) - self.assertEqual( - {"visibility": "public"}, - types.GlanceImageArguments({}).pre_process( - config={}, resource_spec={"visibility": "public"})) - self.assertEqual( - {"visibility": "public"}, - types.GlanceImageArguments({}).pre_process( - config={}, resource_spec={"visibility": "public", - "is_public": False})) - self.assertEqual( - {"visibility": "private"}, - types.GlanceImageArguments({}).pre_process( - config={}, resource_spec={"is_public": False})) - - -class EC2ImageTestCase(test.TestCase): - - def setUp(self): - super(EC2ImageTestCase, self).setUp() - self.clients = fakes.FakeClients() - image1 = fakes.FakeResource(name="cirros-0.3.4-uec", id="100") - self.clients.glance().images._cache(image1) - image2 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk", id="102") - self.clients.glance().images._cache(image2) - image3 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy", - id="102") - self.clients.glance().images._cache(image3) - image4 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy", - id="103") - self.clients.glance().images._cache(image4) - - ec2_image1 = fakes.FakeResource(name="cirros-0.3.4-uec", id="200") - ec2_image2 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk", - id="201") - ec2_image3 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy", - id="202") - ec2_image4 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy", - id="203") - - self.clients.ec2().get_all_images = mock.Mock( - return_value=[ec2_image1, ec2_image2, ec2_image3, ec2_image4]) - - self.type_cls = types.EC2Image( - context={"admin": {"credential": mock.Mock()}}) - self.type_cls._clients = self.clients - - def test_preprocess_by_name(self): - resource_spec = {"name": "^cirros-0.3.4-uec$"} - ec2_image_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual("200", ec2_image_id) - - def test_preprocess_by_id(self): - resource_spec = {"id": "100"} - ec2_image_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual("200", ec2_image_id) - - def test_preprocess_by_id_no_match(self): - resource_spec = {"id": "101"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_name_no_match(self): - resource_spec = {"name": "cirros-0.3.4-uec-boot"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_name_match_multiple(self): - resource_spec = {"name": "cirros-0.3.4-uec-ramdisk-copy"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_regex(self): - resource_spec = {"regex": "-uec$"} - ec2_image_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual("200", ec2_image_id) - - def test_preprocess_by_regex_match_multiple(self): - resource_spec = {"regex": "^cirros"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_regex_no_match(self): - resource_spec = {"regex": "-boot$"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - -class VolumeTypeTestCase(test.TestCase): - - def setUp(self): - super(VolumeTypeTestCase, self).setUp() - cinder = mock.patch("rally.plugins.openstack.types.block.BlockStorage") - self.service = cinder.start().return_value - self.addCleanup(cinder.stop) - - volume_type1 = fakes.FakeResource(name="lvmdriver-1", id=100) - - self.type_cls = types.VolumeType( - context={"admin": {"credential": mock.Mock()}}) - self.service.list_types.return_value = [volume_type1] - - def test_preprocess_by_id(self): - resource_spec = {"id": 100} - volumetype_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual(100, volumetype_id) - - def test_preprocess_by_name(self): - resource_spec = {"name": "lvmdriver-1"} - volumetype_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual(100, volumetype_id) - - def test_preprocess_by_name_no_match(self): - resource_spec = {"name": "nomatch-1"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - def test_preprocess_by_regex(self): - resource_spec = {"regex": "^lvm.*-1"} - volumetype_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual(100, volumetype_id) - - def test_preprocess_by_regex_no_match(self): - resource_spec = {"regex": "dd"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - -class NeutronNetworkTestCase(test.TestCase): - - def setUp(self): - super(NeutronNetworkTestCase, self).setUp() - self.clients = fakes.FakeClients() - net1_data = {"network": { - "name": "net1" - }} - network1 = self.clients.neutron().create_network(net1_data) - self.net1_id = network1["network"]["id"] - self.type_cls = types.NeutronNetwork( - context={"admin": {"credential": mock.Mock()}}) - self.type_cls._clients = self.clients - - def test_preprocess_by_id(self): - resource_spec = {"id": self.net1_id} - network_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual(network_id, self.net1_id) - - def test_preprocess_by_name(self): - resource_spec = {"name": "net1"} - network_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual(network_id, self.net1_id) - - def test_preprocess_by_name_no_match(self): - resource_spec = {"name": "nomatch-1"} - self.assertRaises(exceptions.InvalidScenarioArgument, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - -class WatcherStrategyTestCase(test.TestCase): - - def setUp(self): - super(WatcherStrategyTestCase, self).setUp() - self.clients = fakes.FakeClients() - self.strategy = self.clients.watcher().strategy._cache( - fakes.FakeResource(name="dummy", id="1")) - - self.type_cls = types.WatcherStrategy( - context={"admin": {"credential": mock.Mock()}}) - self.type_cls._clients = self.clients - - def test_preprocess_by_name(self): - resource_spec = {"name": "dummy"} - strategy_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual(self.strategy.uuid, strategy_id) - - def test_preprocess_by_name_no_match(self): - resource_spec = {"name": "dummy-1"} - self.assertRaises(exceptions.RallyException, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) - - -class WatcherGoalTestCase(test.TestCase): - - def setUp(self): - super(WatcherGoalTestCase, self).setUp() - self.clients = fakes.FakeClients() - self.goal = self.clients.watcher().goal._cache( - fakes.FakeResource(name="dummy", id="1")) - self.type_cls = types.WatcherGoal( - context={"admin": {"credential": mock.Mock()}}) - self.type_cls._clients = self.clients - - def test_preprocess_by_name(self): - resource_spec = {"name": "dummy"} - goal_id = self.type_cls.pre_process(resource_spec=resource_spec, - config={}) - self.assertEqual(self.goal.uuid, goal_id) - - def test_preprocess_by_name_no_match(self): - resource_spec = {"name": "dummy-1"} - self.assertRaises(exceptions.RallyException, - self.type_cls.pre_process, - resource_spec=resource_spec, config={}) diff --git a/tests/unit/plugins/openstack/test_validators.py b/tests/unit/plugins/openstack/test_validators.py deleted file mode 100644 index 52e8400960..0000000000 --- a/tests/unit/plugins/openstack/test_validators.py +++ /dev/null @@ -1,982 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import ddt -import mock - -from glanceclient import exc as glance_exc -from novaclient import exceptions as nova_exc - -from rally import consts -from rally import exceptions -from rally.plugins.openstack import validators -from tests.unit import test - - -PATH = "rally.plugins.openstack.validators" - - -context = { - "admin": mock.MagicMock(), - "users": [mock.MagicMock()], -} - -config = dict(args={"image": {"id": "fake_id", - "min_ram": 10, - "size": 1024 ** 3, - "min_disk": 10.0 * (1024 ** 3), - "image_name": "foo_image"}, - "flavor": {"id": "fake_flavor_id", - "name": "test"}, - "foo_image": {"id": "fake_image_id"} - }, - context={"images": {"image_name": "foo_image"}, - "api_versions@openstack": mock.MagicMock()} - ) - - -@mock.patch("rally.plugins.openstack.context.keystone.roles.RoleGenerator") -def test_with_roles_ctx(mock_role_generator): - - @validators.with_roles_ctx() - def func(config, context): - pass - - config = {"contexts": {}} - context = {"admin": {"credential": mock.MagicMock()}, - "task": mock.MagicMock()} - func(config, context) - mock_role_generator().setup.assert_not_called() - - config = {"contexts": {"roles": "admin"}} - func(config, context) - mock_role_generator().setup.assert_called_once_with() - - -@ddt.ddt -class ImageExistsValidatorTestCase(test.TestCase): - - def setUp(self): - super(ImageExistsValidatorTestCase, self).setUp() - self.validator = validators.ImageExistsValidator("image", True) - self.config = copy.deepcopy(config) - self.context = copy.deepcopy(context) - - @ddt.unpack - @ddt.data( - {"param_name": "fake_param", "nullable": True, "err_msg": None}, - {"param_name": "fake_param", "nullable": False, - "err_msg": "Parameter fake_param is not specified."}, - {"param_name": "image", "nullable": True, "err_msg": None}, - ) - def test_validator(self, param_name, nullable, err_msg, ex=False): - validator = validators.ImageExistsValidator(param_name, - nullable) - - clients = self.context["users"][0].clients.return_value - - clients.glance().images.get = mock.Mock() - if ex: - clients.glance().images.get.side_effect = ex - - if err_msg: - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, self.config, None, None) - self.assertEqual(err_msg, e.message) - else: - result = validator.validate(self.config, self.context, None, - None) - self.assertIsNone(result) - - def test_validator_image_from_context(self): - config = { - "args": {"image": {"regex": r"^foo$"}}, - "contexts": {"images": {"image_name": "foo"}}} - - self.validator.validate(self.context, config, None, None) - - @mock.patch("%s.openstack_types.GlanceImage" % PATH) - def test_validator_image_not_in_context(self, mock_glance_image): - mock_glance_image.return_value.pre_process.return_value = "image_id" - config = { - "args": {"image": "fake_image"}, - "contexts": { - "images": {"fake_image_name": "foo"}}} - - clients = self.context[ - "users"][0]["credential"].clients.return_value - clients.glance().images.get = mock.Mock() - - result = self.validator.validate(self.context, config, None, None) - self.assertIsNone(result) - - mock_glance_image.assert_called_once_with( - context={"admin": { - "credential": self.context["users"][0]["credential"]}}) - mock_glance_image.return_value.pre_process.assert_called_once_with( - config["args"]["image"], config={}) - clients.glance().images.get.assert_called_with("image_id") - - exs = [exceptions.InvalidScenarioArgument(), - glance_exc.HTTPNotFound()] - for ex in exs: - clients.glance().images.get.side_effect = ex - - e = self.assertRaises( - validators.validation.ValidationError, - self.validator.validate, self.context, config, None, None) - - self.assertEqual("Image 'fake_image' not found", e.message) - - -@ddt.ddt -class ExternalNetworkExistsValidatorTestCase(test.TestCase): - - def setUp(self): - super(ExternalNetworkExistsValidatorTestCase, self).setUp() - self.validator = validators.ExternalNetworkExistsValidator("net") - self.config = copy.deepcopy(config) - self.context = copy.deepcopy(context) - - @ddt.unpack - @ddt.data( - {"foo_conf": {}}, - {"foo_conf": {"args": {"net": "custom"}}}, - {"foo_conf": {"args": {"net": "non_exist"}}, - "err_msg": "External (floating) network with name non_exist" - " not found by user {}. Available networks:" - " [{}, {}]"}, - {"foo_conf": {"args": {"net": "custom"}}, - "net1_name": {"name": {"net": "public"}}, - "net2_name": {"name": {"net": "custom"}}, - "err_msg": "External (floating) network with name custom" - " not found by user {}. Available networks:" - " [{}, {}]"} - ) - def test_validator(self, foo_conf, net1_name="public", net2_name="custom", - err_msg=""): - - user = self.context["users"][0] - - net1 = {"name": net1_name, "router:external": True} - net2 = {"name": net2_name, "router:external": True} - - user["credential"].clients().neutron().list_networks.return_value = { - "networks": [net1, net2]} - if err_msg: - e = self.assertRaises( - validators.validation.ValidationError, - self.validator.validate, self.context, foo_conf, - None, None) - self.assertEqual( - err_msg.format(user["credential"].username, net1, net2), - e.message) - else: - result = self.validator.validate(self.context, foo_conf, - None, None) - self.assertIsNone(result, "Unexpected result '%s'" % result) - - -@ddt.ddt -class RequiredNeutronExtensionsValidatorTestCase(test.TestCase): - - def setUp(self): - super(RequiredNeutronExtensionsValidatorTestCase, self).setUp() - self.config = copy.deepcopy(config) - self.context = copy.deepcopy(context) - - def test_validator(self): - validator = validators.RequiredNeutronExtensionsValidator( - "existing_extension") - clients = self.context["users"][0]["credential"].clients() - - clients.neutron().list_extensions.return_value = { - "extensions": [{"alias": "existing_extension"}]} - - validator.validate(self.context, {}, None, None) - - def test_validator_failed(self): - err_msg = "Neutron extension absent_extension is not configured" - validator = validators.RequiredNeutronExtensionsValidator( - "absent_extension") - clients = self.context["users"][0]["credential"].clients() - - clients.neutron().list_extensions.return_value = { - "extensions": [{"alias": "existing_extension"}]} - - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, {}, None, None) - self.assertEqual(err_msg, e.message) - - -class FlavorExistsValidatorTestCase(test.TestCase): - - def setUp(self): - super(FlavorExistsValidatorTestCase, self).setUp() - self.validator = validators.FlavorExistsValidator( - param_name="foo_flavor") - self.config = copy.deepcopy(config) - self.context = copy.deepcopy(context) - - def test__get_validated_flavor_wrong_value_in_config(self): - e = self.assertRaises( - validators.validation.ValidationError, - self.validator._get_validated_flavor, self.config, - mock.MagicMock(), "foo_flavor") - self.assertEqual("Parameter foo_flavor is not specified.", - e.message) - - @mock.patch("%s.openstack_types.Flavor" % PATH) - def test__get_validated_flavor(self, mock_flavor): - mock_flavor.return_value.pre_process.return_value = "flavor_id" - - clients = mock.Mock() - clients.nova().flavors.get.return_value = "flavor" - - result = self.validator._get_validated_flavor(self.config, - clients, - "flavor") - self.assertEqual("flavor", result) - - mock_flavor.assert_called_once_with( - context={"admin": {"credential": clients.credential}} - ) - mock_flavor_obj = mock_flavor.return_value - mock_flavor_obj.pre_process.assert_called_once_with( - self.config["args"]["flavor"], config={}) - clients.nova().flavors.get.assert_called_once_with(flavor="flavor_id") - mock_flavor_obj.pre_process.reset_mock() - - clients.side_effect = exceptions.InvalidScenarioArgument("") - result = self.validator._get_validated_flavor( - self.config, clients, "flavor") - self.assertEqual("flavor", result) - mock_flavor_obj.pre_process.assert_called_once_with( - self.config["args"]["flavor"], config={}) - clients.nova().flavors.get.assert_called_with(flavor="flavor_id") - - @mock.patch("%s.openstack_types.Flavor" % PATH) - def test__get_validated_flavor_not_found(self, mock_flavor): - mock_flavor.return_value.pre_process.return_value = "flavor_id" - - clients = mock.MagicMock() - clients.nova().flavors.get.side_effect = nova_exc.NotFound("") - - e = self.assertRaises( - validators.validation.ValidationError, - self.validator._get_validated_flavor, - self.config, clients, "flavor") - self.assertEqual("Flavor '%s' not found" % - self.config["args"]["flavor"], - e.message) - mock_flavor_obj = mock_flavor.return_value - mock_flavor_obj.pre_process.assert_called_once_with( - self.config["args"]["flavor"], config={}) - - @mock.patch("%s.types.obj_from_name" % PATH) - @mock.patch("%s.flavors_ctx.FlavorConfig" % PATH) - def test__get_flavor_from_context(self, mock_flavor_config, - mock_obj_from_name): - config = { - "contexts": {"images": {"fake_parameter_name": "foo_image"}}} - - e = self.assertRaises( - validators.validation.ValidationError, - self.validator._get_flavor_from_context, - config, "foo_flavor") - self.assertEqual("No flavors context", e.message) - - config = {"contexts": {"images": {"fake_parameter_name": "foo_image"}, - "flavors": [{"flavor1": "fake_flavor1"}]}} - result = self.validator._get_flavor_from_context(config, "foo_flavor") - self.assertEqual("" % result.name, result.id) - - def test_validate(self): - expected_e = validators.validation.ValidationError("fpp") - self.validator._get_validated_flavor = mock.Mock( - side_effect=expected_e) - - config = {} - ctx = mock.MagicMock() - actual_e = self.assertRaises( - validators.validation.ValidationError, - self.validator.validate, ctx, config, None, None) - self.assertEqual(expected_e, actual_e) - self.validator._get_validated_flavor.assert_called_once_with( - config=config, - clients=ctx["users"][0]["credential"].clients(), - param_name=self.validator.param_name) - - -@ddt.ddt -class ImageValidOnFlavorValidatorTestCase(test.TestCase): - - def setUp(self): - super(ImageValidOnFlavorValidatorTestCase, self).setUp() - self.validator = validators.ImageValidOnFlavorValidator("foo_flavor", - "image") - self.config = copy.deepcopy(config) - self.context = copy.deepcopy(context) - - @ddt.data( - {"validate_disk": True, "flavor_disk": True}, - {"validate_disk": False, "flavor_disk": True}, - {"validate_disk": False, "flavor_disk": False} - ) - @ddt.unpack - def test_validate(self, validate_disk, flavor_disk): - validator = validators.ImageValidOnFlavorValidator( - flavor_param="foo_flavor", - image_param="image", - fail_on_404_image=False, - validate_disk=validate_disk) - - min_ram = 2048 - disk = 10 - fake_image = {"min_ram": min_ram, - "size": disk * (1024 ** 3), - "min_disk": disk} - fake_flavor = mock.Mock(disk=None, ram=min_ram * 2) - if flavor_disk: - fake_flavor.disk = disk * 2 - - validator._get_validated_flavor = mock.Mock( - return_value=fake_flavor) - - # case 1: no image, but it is ok, since fail_on_404_image is False - validator._get_validated_image = mock.Mock( - side_effect=validators.validation.ValidationError("!!!")) - validator.validate(self.context, {}, None, None) - - # case 2: there is an image - validator._get_validated_image = mock.Mock( - return_value=fake_image) - validator.validate(self.context, {}, None, None) - - # case 3: check caching of the flavor - self.context["users"].append(self.context["users"][0]) - validator._get_validated_image.reset_mock() - validator._get_validated_flavor.reset_mock() - - validator.validate(self.context, {}, None, None) - - self.assertEqual(1, validator._get_validated_flavor.call_count) - self.assertEqual(2, validator._get_validated_image.call_count) - - def test_validate_failed(self): - validator = validators.ImageValidOnFlavorValidator( - flavor_param="foo_flavor", - image_param="image", - fail_on_404_image=True, - validate_disk=True) - - min_ram = 2048 - disk = 10 - fake_flavor = mock.Mock(disk=disk, ram=min_ram) - fake_flavor.id = "flavor_id" - - validator._get_validated_flavor = mock.Mock( - return_value=fake_flavor) - - # case 1: there is no image and fail_on_404_image flag is True - expected_e = validators.validation.ValidationError("!!!") - validator._get_validated_image = mock.Mock( - side_effect=expected_e) - actual_e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, {}, None, None - ) - self.assertEqual(expected_e, actual_e) - - # case 2: there is no right flavor - expected_e = KeyError("Ooops") - validator._get_validated_flavor.side_effect = expected_e - actual_e = self.assertRaises( - KeyError, - validator.validate, self.context, {}, None, None - ) - self.assertEqual(expected_e, actual_e) - - # case 3: ram of a flavor is less than min_ram of an image - validator._get_validated_flavor = mock.Mock( - return_value=fake_flavor) - - fake_image = {"min_ram": min_ram * 2, "id": "image_id"} - validator._get_validated_image = mock.Mock( - return_value=fake_image) - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, {}, None, None - ) - self.assertEqual( - "The memory size for flavor 'flavor_id' is too small for " - "requested image 'image_id'.", e.message) - - # case 4: disk of a flavor is less than size of an image - fake_image = {"min_ram": min_ram / 2.0, - "size": disk * (1024 ** 3) * 3, - "id": "image_id"} - validator._get_validated_image = mock.Mock( - return_value=fake_image) - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, {}, None, None - ) - self.assertEqual( - "The disk size for flavor 'flavor_id' is too small for " - "requested image 'image_id'.", e.message) - - # case 5: disk of a flavor is less than size of an image - fake_image = {"min_ram": min_ram, - "size": disk * (1024 ** 3), - "min_disk": disk * 2, - "id": "image_id"} - validator._get_validated_image = mock.Mock( - return_value=fake_image) - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, {}, None, None - ) - self.assertEqual( - "The minimal disk size for flavor 'flavor_id' is too small for " - "requested image 'image_id'.", e.message) - - # case 6: _get_validated_image raises an unexpected error, - # fail_on_404_image=False should not work in this case - expected_e = KeyError("Foo!") - validator = validators.ImageValidOnFlavorValidator( - flavor_param="foo_flavor", - image_param="image", - fail_on_404_image=False, - validate_disk=True) - validator._get_validated_image = mock.Mock( - side_effect=expected_e) - validator._get_validated_flavor = mock.Mock() - - actual_e = self.assertRaises( - KeyError, - validator.validate, self.context, {}, None, None - ) - - self.assertEqual(expected_e, actual_e) - - @mock.patch("%s.openstack_types.GlanceImage" % PATH) - def test__get_validated_image(self, mock_glance_image): - mock_glance_image.return_value.pre_process.return_value = "image_id" - image = { - "size": 0, - "min_ram": 0, - "min_disk": 0 - } - # Get image name from context - result = self.validator._get_validated_image({ - "args": { - "image": {"regex": r"^foo$"}}, - "contexts": { - "images": {"image_name": "foo"}}}, - mock.Mock(), "image") - self.assertEqual(image, result) - - clients = mock.Mock() - clients.glance().images.get().to_dict.return_value = { - "image": "image_id"} - image["image"] = "image_id" - - result = self.validator._get_validated_image(self.config, - clients, - "image") - self.assertEqual(image, result) - mock_glance_image.assert_called_once_with( - context={"admin": {"credential": clients.credential}}) - mock_glance_image.return_value.pre_process.assert_called_once_with( - config["args"]["image"], config={}) - clients.glance().images.get.assert_called_with("image_id") - - @mock.patch("%s.openstack_types.GlanceImage" % PATH) - def test__get_validated_image_incorrect_param(self, mock_glance_image): - mock_glance_image.return_value.pre_process.return_value = "image_id" - # Wrong 'param_name' - e = self.assertRaises( - validators.validation.ValidationError, - self.validator._get_validated_image, self.config, - mock.Mock(), "fake_param") - self.assertEqual("Parameter fake_param is not specified.", - e.message) - - # 'image_name' is not in 'image_context' - image = {"id": "image_id", "size": 1024, - "min_ram": 256, "min_disk": 512} - - clients = mock.Mock() - clients.glance().images.get().to_dict.return_value = image - config = {"args": {"image": "foo_image", - "context": {"images": { - "fake_parameter_name": "foo_image"} - }} - } - result = self.validator._get_validated_image(config, clients, "image") - self.assertEqual(image, result) - - mock_glance_image.assert_called_once_with( - context={"admin": {"credential": clients.credential}}) - mock_glance_image.return_value.pre_process.assert_called_once_with( - config["args"]["image"], config={}) - clients.glance().images.get.assert_called_with("image_id") - - @mock.patch("%s.openstack_types.GlanceImage" % PATH) - def test__get_validated_image_exceptions(self, mock_glance_image): - mock_glance_image.return_value.pre_process.return_value = "image_id" - clients = mock.Mock() - clients.glance().images.get.side_effect = glance_exc.HTTPNotFound("") - e = self.assertRaises( - validators.validation.ValidationError, - self.validator._get_validated_image, - config, clients, "image") - self.assertEqual("Image '%s' not found" % config["args"]["image"], - e.message) - - mock_glance_image.assert_called_once_with( - context={"admin": {"credential": clients.credential}}) - mock_glance_image.return_value.pre_process.assert_called_once_with( - config["args"]["image"], config={}) - clients.glance().images.get.assert_called_with("image_id") - mock_glance_image.return_value.pre_process.reset_mock() - - clients.side_effect = exceptions.InvalidScenarioArgument("") - e = self.assertRaises( - validators.validation.ValidationError, - self.validator._get_validated_image, config, clients, "image") - self.assertEqual("Image '%s' not found" % config["args"]["image"], - e.message) - mock_glance_image.return_value.pre_process.assert_called_once_with( - config["args"]["image"], config={}) - clients.glance().images.get.assert_called_with("image_id") - - -class RequiredClientsValidatorTestCase(test.TestCase): - - def setUp(self): - super(RequiredClientsValidatorTestCase, self).setUp() - self.config = copy.deepcopy(config) - self.context = copy.deepcopy(context) - - def test_validate(self): - validator = validators.RequiredClientsValidator(components=["keystone", - "nova"]) - clients = self.context["users"][0]["credential"].clients.return_value - - result = validator.validate(self.context, self.config, None, None) - self.assertIsNone(result) - - clients.nova.side_effect = ImportError - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, self.config, None, None) - self.assertEqual("Client for nova is not installed. To install it " - "run `pip install python-novaclient`", e.message) - - def test_validate_with_admin(self): - validator = validators.RequiredClientsValidator(components=["keystone", - "nova"], - admin=True) - clients = self.context["admin"]["credential"].clients.return_value - result = validator.validate(self.context, self.config, None, None) - self.assertIsNone(result) - - clients.keystone.side_effect = ImportError - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, self.config, None, None) - self.assertEqual("Client for keystone is not installed. To install it " - "run `pip install python-keystoneclient`", e.message) - - -class RequiredServicesValidatorTestCase(test.TestCase): - - def setUp(self): - super(RequiredServicesValidatorTestCase, self).setUp() - self.validator = validators.RequiredServicesValidator([ - consts.Service.KEYSTONE, - consts.Service.NOVA, - consts.Service.NOVA_NET]) - self.config = config - self.context = context - - def test_validator(self): - - self.config["context"]["api_versions@openstack"].get = mock.Mock( - return_value={consts.Service.KEYSTONE: "service_type"}) - - clients = self.context["admin"].get("credential").clients() - - clients.services().values.return_value = [ - consts.Service.KEYSTONE, consts.Service.NOVA, - consts.Service.NOVA_NET] - fake_service = mock.Mock(binary="nova-network", status="enabled") - clients.nova.services.list.return_value = [fake_service] - result = self.validator.validate(self.context, self.config, - None, None) - self.assertIsNone(result) - - fake_service = mock.Mock(binary="keystone", status="enabled") - clients.nova.services.list.return_value = [fake_service] - result = self.validator.validate(self.context, self.config, - None, None) - self.assertIsNone(result) - - fake_service = mock.Mock(binary="nova-network", status="disabled") - clients.nova.services.list.return_value = [fake_service] - result = self.validator.validate(self.context, self.config, - None, None) - self.assertIsNone(result) - - def test_validator_wrong_service(self): - - self.config["context"]["api_versions@openstack"].get = mock.Mock( - return_value={consts.Service.KEYSTONE: "service_type", - consts.Service.NOVA: "service_name"}) - - clients = self.context["admin"].get("credential").clients() - clients.services().values.return_value = [ - consts.Service.KEYSTONE, consts.Service.NOVA] - - validator = validators.RequiredServicesValidator([ - consts.Service.KEYSTONE, - consts.Service.NOVA, "lol"]) - - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, {}, None, None) - expected_msg = ("'{0}' service is not available. Hint: If '{0}'" - " service has non-default service_type, try to setup" - " it via 'api_versions' context.").format("lol") - self.assertEqual(expected_msg, e.message) - - -@ddt.ddt -class ValidateHeatTemplateValidatorTestCase(test.TestCase): - - def setUp(self): - super(ValidateHeatTemplateValidatorTestCase, self).setUp() - self.validator = validators.ValidateHeatTemplateValidator( - "template_path1", "template_path2") - self.config = copy.deepcopy(config) - self.context = copy.deepcopy(context) - - @ddt.data( - {"exception_msg": "Heat template validation failed on fake_path1. " - "Original error message: fake_msg."}, - {"exception_msg": None} - ) - @ddt.unpack - @mock.patch("%s.os.path.exists" % PATH, - return_value=True) - @mock.patch("rally.plugins.openstack.validators.open", - side_effect=mock.mock_open(), create=True) - def test_validate(self, mock_open, mock_exists, exception_msg): - clients = self.context["users"][0]["credential"].clients() - mock_open().__enter__().read.side_effect = ["fake_template1", - "fake_template2"] - heat_validator = mock.MagicMock() - if exception_msg: - heat_validator.side_effect = Exception("fake_msg") - clients.heat().stacks.validate = heat_validator - context = {"args": {"template_path1": "fake_path1", - "template_path2": "fake_path2"}} - if not exception_msg: - result = self.validator.validate(self.context, context, None, None) - - heat_validator.assert_has_calls([ - mock.call(template="fake_template1"), - mock.call(template="fake_template2") - ]) - mock_open.assert_has_calls([ - mock.call("fake_path1", "r"), - mock.call("fake_path2", "r") - ], any_order=True) - self.assertIsNone(result) - else: - e = self.assertRaises( - validators.validation.ValidationError, - self.validator.validate, self.context, context, None, None) - heat_validator.assert_called_once_with( - template="fake_template1") - self.assertEqual( - "Heat template validation failed on fake_path1." - " Original error message: fake_msg.", e.message) - - def test_validate_missed_params(self): - validator = validators.ValidateHeatTemplateValidator( - params="fake_param") - - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, self.config, None, None) - - expected_msg = ("Path to heat template is not specified. Its needed " - "for heat template validation. Please check the " - "content of `fake_param` scenario argument.") - self.assertEqual(expected_msg, e.message) - - @mock.patch("%s.os.path.exists" % PATH, - return_value=False) - def test_validate_file_not_found(self, mock_exists): - config = {"args": {"template_path1": "fake_path1", - "template_path2": "fake_path2"}} - e = self.assertRaises( - validators.validation.ValidationError, - self.validator.validate, self.context, config, None, None) - expected_msg = "No file found by the given path fake_path1" - self.assertEqual(expected_msg, e.message) - - -class RequiredCinderServicesValidatorTestCase(test.TestCase): - - def setUp(self): - super(RequiredCinderServicesValidatorTestCase, self).setUp() - self.context = copy.deepcopy(context) - self.config = copy.deepcopy(config) - - def test_validate(self): - validator = validators.RequiredCinderServicesValidator( - "cinder_service") - - fake_service = mock.Mock(binary="cinder_service", state="up") - clients = self.context["admin"]["credential"].clients() - clients.cinder().services.list.return_value = [fake_service] - result = validator.validate(self.context, self.config, None, None) - self.assertIsNone(result) - - fake_service.state = "down" - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, self.config, None, None) - self.assertEqual("cinder_service service is not available", - e.message) - - -@ddt.ddt -class RequiredAPIVersionsValidatorTestCase(test.TestCase): - - def setUp(self): - super(RequiredAPIVersionsValidatorTestCase, self).setUp() - self.config = copy.deepcopy(config) - self.context = copy.deepcopy(context) - - def _get_keystone_v2_mock_client(self): - keystone = mock.Mock() - del keystone.projects - keystone.tenants = mock.Mock() - return keystone - - def _get_keystone_v3_mock_client(self): - keystone = mock.Mock() - del keystone.tenants - keystone.projects = mock.Mock() - return keystone - - def test_validate(self): - validator = validators.RequiredAPIVersionsValidator("keystone", - [2.0, 3]) - - clients = self.context["users"][0]["credential"].clients() - - clients.keystone.return_value = self._get_keystone_v3_mock_client() - validator.validate(self.context, self.config, None, None) - - clients.keystone.return_value = self._get_keystone_v2_mock_client() - validator.validate(self.context, self.config, None, None) - - def test_validate_with_keystone_v2(self): - validator = validators.RequiredAPIVersionsValidator("keystone", - [2.0]) - - clients = self.context["users"][0]["credential"].clients() - clients.keystone.return_value = self._get_keystone_v2_mock_client() - validator.validate(self.context, self.config, None, None) - - clients.keystone.return_value = self._get_keystone_v3_mock_client() - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, self.config, None, None) - self.assertEqual("Task was designed to be used with keystone V2.0, " - "but V3 is selected.", e.message) - - def test_validate_with_keystone_v3(self): - validator = validators.RequiredAPIVersionsValidator("keystone", - [3]) - - clients = self.context["users"][0]["credential"].clients() - clients.keystone.return_value = self._get_keystone_v3_mock_client() - validator.validate(self.context, self.config, None, None) - - clients.keystone.return_value = self._get_keystone_v2_mock_client() - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, self.config, None, None) - self.assertEqual("Task was designed to be used with keystone V3, " - "but V2.0 is selected.", e.message) - - @ddt.unpack - @ddt.data( - {"nova": 2, "versions": [2], "err_msg": None}, - {"nova": 3, "versions": [2], - "err_msg": "Task was designed to be used with nova V2, " - "but V3 is selected."}, - {"nova": None, "versions": [2], - "err_msg": "Unable to determine the API version."}, - {"nova": 2, "versions": [2, 3], "err_msg": None}, - {"nova": 4, "versions": [2, 3], - "err_msg": "Task was designed to be used with nova V2, 3, " - "but V4 is selected."} - ) - def test_validate_nova(self, nova, versions, err_msg): - validator = validators.RequiredAPIVersionsValidator("nova", - versions) - - clients = self.context["users"][0]["credential"].clients() - - clients.nova.choose_version.return_value = nova - config = {"contexts": {"api_versions@openstack": {}}} - - if err_msg: - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, config, None, None) - self.assertEqual(err_msg, e.message) - else: - result = validator.validate(self.context, config, None, None) - self.assertIsNone(result) - - @ddt.unpack - @ddt.data({"version": 2, "err_msg": None}, - {"version": 3, "err_msg": "Task was designed to be used with " - "nova V3, but V2 is selected."}) - def test_validate_context(self, version, err_msg): - validator = validators.RequiredAPIVersionsValidator("nova", - [version]) - - config = { - "contexts": {"api_versions@openstack": {"nova": {"version": 2}}}} - - if err_msg: - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, config, None, None) - self.assertEqual(err_msg, e.message) - else: - result = validator.validate(self.context, config, None, None) - self.assertIsNone(result) - - -class VolumeTypeExistsValidatorTestCase(test.TestCase): - - def setUp(self): - super(VolumeTypeExistsValidatorTestCase, self).setUp() - self.validator = validators.VolumeTypeExistsValidator("volume_type", - True) - self.config = copy.deepcopy(config) - self.context = copy.deepcopy(context) - - def test_validator_without_ctx(self): - validator = validators.VolumeTypeExistsValidator("fake_param", - nullable=True) - - clients = self.context["users"][0]["credential"].clients() - - clients.cinder().volume_types.list.return_value = [mock.MagicMock()] - - result = validator.validate(self.context, self.config, None, None) - self.assertIsNone(result, "Unexpected result") - - def test_validator_without_ctx_failed(self): - validator = validators.VolumeTypeExistsValidator("fake_param", - nullable=False) - - clients = self.context["users"][0]["credential"].clients() - - clients.cinder().volume_types.list.return_value = [mock.MagicMock()] - - e = self.assertRaises( - validators.validation.ValidationError, - validator.validate, self.context, self.config, None, None) - self.assertEqual( - "The parameter 'fake_param' is required and should not be empty.", - e.message) - - def test_validate_with_ctx(self): - clients = self.context["users"][0]["credential"].clients() - clients.cinder().volume_types.list.return_value = [] - ctx = {"args": {"volume_type": "fake_type"}, - "contexts": {"volume_types": ["fake_type"]}} - result = self.validator.validate(self.context, ctx, None, None) - - self.assertIsNone(result) - - def test_validate_with_ctx_failed(self): - clients = self.context["users"][0]["credential"].clients() - clients.cinder().volume_types.list.return_value = [] - config = {"args": {"volume_type": "fake_type"}, - "contexts": {"volume_types": ["fake_type_2"]}} - e = self.assertRaises( - validators.validation.ValidationError, - self.validator.validate, self.context, config, None, None) - - err_msg = ("Specified volume type fake_type not found for user {}. " - "List of available types: ['fake_type_2']") - fake_user = self.context["users"][0] - self.assertEqual(err_msg.format(fake_user), e.message) - - -@ddt.ddt -class WorkbookContainsWorkflowValidatorTestCase(test.TestCase): - - @mock.patch("rally.common.yamlutils.safe_load") - @mock.patch("%s.os.access" % PATH) - @mock.patch("%s.open" % PATH) - def test_validator(self, mock_open, mock_access, mock_safe_load): - mock_safe_load.return_value = { - "version": "2.0", - "name": "wb", - "workflows": { - "wf1": { - "type": "direct", - "tasks": { - "t1": { - "action": "std.noop" - } - } - } - } - } - validator = validators.WorkbookContainsWorkflowValidator( - workbook_param="definition", workflow_param="workflow_name") - - config = { - "args": { - "definition": "fake_path1", - "workflow_name": "wf1" - } - } - - result = validator.validate(None, config, None, None) - self.assertIsNone(result) - - self.assertEqual(1, mock_open.called) - self.assertEqual(1, mock_access.called) - self.assertEqual(1, mock_safe_load.called) diff --git a/tests/unit/plugins/openstack/verification/__init__.py b/tests/unit/plugins/openstack/verification/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/verification/tempest/__init__.py b/tests/unit/plugins/openstack/verification/tempest/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/verification/tempest/test_config.py b/tests/unit/plugins/openstack/verification/tempest/test_config.py deleted file mode 100644 index f08ed441fe..0000000000 --- a/tests/unit/plugins/openstack/verification/tempest/test_config.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.common import cfg -from rally.plugins.openstack import osclients -from rally.plugins.openstack.verification.tempest import config -from tests.unit import fakes -from tests.unit import test - - -CONF = cfg.CONF - - -CRED = { - "username": "admin", - "tenant_name": "admin", - "password": "admin-12345", - "auth_url": "http://test:5000/v2.0/", - "permission": "admin", - "region_name": "test", - "https_insecure": False, - "https_cacert": "/path/to/cacert/file", - "user_domain_name": "admin", - "project_domain_name": "admin" -} - -PATH = "rally.plugins.openstack.verification.tempest.config" - - -@ddt.ddt -class TempestConfigfileManagerTestCase(test.TestCase): - - def setUp(self): - super(TempestConfigfileManagerTestCase, self).setUp() - deployment = fakes.FakeDeployment(uuid="fake_deployment", - admin=fakes.fake_credential(**CRED)) - self.tempest = config.TempestConfigfileManager(deployment) - - def test__configure_auth(self): - self.tempest.conf.add_section("auth") - self.tempest._configure_auth() - - expected = ( - ("admin_username", CRED["username"]), - ("admin_password", CRED["password"]), - ("admin_project_name", CRED["tenant_name"]), - ("admin_domain_name", CRED["user_domain_name"])) - result = self.tempest.conf.items("auth") - for item in expected: - self.assertIn(item, result) - - @ddt.data("data_processing", "data-processing") - def test__configure_data_processing(self, service_type): - self.tempest.available_services = ["sahara"] - - self.tempest.clients.services.return_value = { - service_type: "sahara"} - self.tempest.conf.add_section("data-processing") - self.tempest._configure_data_processing() - self.assertEqual(service_type, - self.tempest.conf.get("data-processing", - "catalog_type")) - - @ddt.data( - # The prefix "ex_" is abbreviation of "expected" - # case #1: both versions are discoverable; version is in the auth_url - {"auth_url": "http://example.com/v2.0", - "data": [{"version": (3, 0), "url": "foo3.com"}, - {"version": (2, 0), "url": "foo2.com"}], - "ex_uri": "http://example.com/v2.0", "ex_auth_version": "v2", - "ex_uri_v3": "http://example.com/v3"}, - # case #2: the same case, but v3 is in the url - {"auth_url": "http://example.com/v3", - "data": [{"version": (3, 0), "url": "foo3.com"}, - {"version": (2, 0), "url": "foo2.com"}], - "ex_uri": "http://example.com/v2.0", "ex_auth_version": "v3", - "ex_uri_v3": "http://example.com/v3"}, - # case #3: both versions are discoverable; version is not in auth_url - {"auth_url": "http://example.com", - "data": [{"version": (3, 0), "url": "foo3.com"}, - {"version": (2, 0), "url": "foo2.com"}], - "ex_uri": "foo2.com", "ex_uri_v3": "foo3.com", - "ex_auth_version": "v3"}, - # case #4: the same case, but data in the another sort. - {"auth_url": "http://example.com", - "data": [{"version": (2, 0), "url": "foo2.com"}, - {"version": (3, 0), "url": "foo3.com"}], - "ex_uri": "foo2.com", "ex_uri_v3": "foo3.com", - "ex_auth_version": "v3"}, - # case #5: only one version is discoverable; - {"auth_url": "http://example.com", - "data": [{"version": (2, 0), "url": "foo2.com"}], - "ex_uri": "foo2.com", "ex_auth_version": "v2", - "ex_uri_v3": "http://example.com/v3"}, - # case #6: the same case, but keystone v3 is discoverable - {"auth_url": "http://example.com", - "data": [{"version": (3, 0), "url": "foo3.com"}], - "ex_uri": "http://example.com/v2.0", "ex_auth_version": "v3", - "ex_uri_v3": "foo3.com", - "ex_v2_off": True} - ) - @ddt.unpack - def test__configure_identity(self, auth_url, data, ex_uri, - ex_uri_v3, ex_auth_version, ex_v2_off=False): - self.tempest.conf.add_section("identity") - self.tempest.conf.add_section("identity-feature-enabled") - self.tempest.credential.auth_url = auth_url - process_url = osclients.Keystone( - self.tempest.credential, 0, 0)._remove_url_version - self.tempest.clients.keystone._remove_url_version = process_url - - from keystoneauth1 import discover - from keystoneauth1 import session - - with mock.patch.object(discover, "Discover") as mock_discover: - with mock.patch.object(session, "Session") as mock_session: - mock_discover.return_value.version_data.return_value = data - - self.tempest._configure_identity() - - mock_discover.assert_called_once_with( - mock_session.return_value, auth_url) - - expected = {"region": CRED["region_name"], - "auth_version": ex_auth_version, - "uri": ex_uri, "uri_v3": ex_uri_v3, - "disable_ssl_certificate_validation": str( - CRED["https_insecure"]), - "ca_certificates_file": CRED["https_cacert"]} - self.assertEqual(expected, dict(self.tempest.conf.items("identity"))) - if ex_v2_off: - self.assertEqual( - "False", - self.tempest.conf.get("identity-feature-enabled", "api_v2")) - - def test__configure_network_if_neutron(self): - self.tempest.available_services = ["neutron"] - client = self.tempest.clients.neutron() - client.list_networks.return_value = { - "networks": [ - { - "status": "ACTIVE", - "id": "test_id", - "name": "test_name", - "router:external": True - } - ] - } - - self.tempest.conf.add_section("network") - self.tempest._configure_network() - self.assertEqual("test_id", - self.tempest.conf.get("network", "public_network_id")) - self.assertEqual("test_name", - self.tempest.conf.get("network", - "floating_network_name")) - - def test__configure_network_if_nova(self): - self.tempest.available_services = ["nova"] - client = self.tempest.clients.nova() - client.networks.list.return_value = [ - mock.MagicMock(human_id="fake-network")] - - self.tempest.conf.add_section("compute") - self.tempest.conf.add_section("validation") - self.tempest._configure_network() - - expected = {"compute": ("fixed_network_name", "fake-network"), - "validation": ("network_for_ssh", "fake-network")} - for section, option in expected.items(): - result = self.tempest.conf.items(section) - self.assertIn(option, result) - - def test__configure_network_feature_enabled(self): - self.tempest.available_services = ["neutron"] - client = self.tempest.clients.neutron() - client.list_ext.return_value = { - "extensions": [ - {"alias": "dvr"}, - {"alias": "extra_dhcp_opt"}, - {"alias": "extraroute"} - ] - } - - self.tempest.conf.add_section("network-feature-enabled") - self.tempest._configure_network_feature_enabled() - client.list_ext.assert_called_once_with("extensions", "/extensions", - retrieve_all=True) - self.assertEqual("dvr,extra_dhcp_opt,extraroute", - self.tempest.conf.get("network-feature-enabled", - "api_extensions")) - - def test__configure_object_storage(self): - self.tempest.conf.add_section("object-storage") - self.tempest._configure_object_storage() - - expected = ( - ("operator_role", CONF.openstack.swift_operator_role), - ("reseller_admin_role", CONF.openstack.swift_reseller_admin_role)) - result = self.tempest.conf.items("object-storage") - for item in expected: - self.assertIn(item, result) - - def test__configure_orchestration(self): - self.tempest.conf.add_section("orchestration") - self.tempest._configure_orchestration() - - expected = ( - ("stack_owner_role", CONF.openstack.heat_stack_owner_role), - ("stack_user_role", CONF.openstack.heat_stack_user_role)) - result = self.tempest.conf.items("orchestration") - for item in expected: - self.assertIn(item, result) - - def test__configure_service_available(self): - available_services = ("nova", "cinder", "glance", "sahara") - self.tempest.available_services = available_services - self.tempest.conf.add_section("service_available") - self.tempest._configure_service_available() - - expected = ( - ("neutron", "False"), ("heat", "False"), ("nova", "True"), - ("swift", "False"), ("cinder", "True"), ("sahara", "True"), - ("glance", "True")) - result = self.tempest.conf.items("service_available") - for item in expected: - self.assertIn(item, result) - - @ddt.data({}, {"service": "neutron", "connect_method": "floating"}) - @ddt.unpack - def test__configure_validation(self, service="nova", - connect_method="fixed"): - self.tempest.available_services = [service] - self.tempest.conf.add_section("validation") - self.tempest._configure_validation() - - expected = (("connect_method", connect_method), ) - result = self.tempest.conf.items("validation") - for item in expected: - self.assertIn(item, result) - - @mock.patch("%s.six.StringIO" % PATH) - @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open()) - @mock.patch("inspect.getmembers") - def test_create(self, mock_inspect_getmembers, mock_open, mock_string_io): - configure_something_method = mock.MagicMock() - mock_inspect_getmembers.return_value = [("_configure_something", - configure_something_method)] - self.tempest.conf.read = mock.Mock() - self.tempest.conf.write = mock.Mock() - self.tempest.conf.read.return_value = "[section]\noption = value" - - fake_extra_conf = {"section2": {"option2": "value2"}} - self.tempest.create("/path/to/fake/conf", fake_extra_conf) - - self.assertEqual(1, configure_something_method.call_count) - self.assertIn(("option2", "value2"), - self.tempest.conf.items("section2")) - mock_open.assert_called_once_with("/path/to/fake/conf", "w") - self.tempest.conf.write.assert_has_calls( - [mock.call(mock_open.side_effect()), - mock.call(mock_string_io.return_value)]) - mock_string_io.return_value.getvalue.assert_called_once_with() diff --git a/tests/unit/plugins/openstack/verification/tempest/test_context.py b/tests/unit/plugins/openstack/verification/tempest/test_context.py deleted file mode 100644 index cc76790173..0000000000 --- a/tests/unit/plugins/openstack/verification/tempest/test_context.py +++ /dev/null @@ -1,413 +0,0 @@ -# Copyright 2017: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import ddt -import mock -import requests - -from rally.common import cfg -from rally import exceptions -from rally.plugins.openstack.verification.tempest import config -from rally.plugins.openstack.verification.tempest import context -from tests.unit import fakes -from tests.unit import test - - -CONF = cfg.CONF - - -CRED = { - "username": "admin", - "tenant_name": "admin", - "password": "admin-12345", - "auth_url": "http://test:5000/v2.0/", - "permission": "admin", - "region_name": "test", - "https_insecure": False, - "https_cacert": "/path/to/cacert/file", - "user_domain_name": "admin", - "project_domain_name": "admin" -} - -PATH = "rally.plugins.openstack.verification.tempest.context" - - -@ddt.ddt -class TempestContextTestCase(test.TestCase): - - def setUp(self): - super(TempestContextTestCase, self).setUp() - - self.mock_isfile = mock.patch("os.path.isfile", - return_value=True).start() - - self.cred = fakes.fake_credential(**CRED) - self.deployment = fakes.FakeDeployment( - uuid="fake_deployment", admin=self.cred) - cfg = {"verifier": mock.Mock(deployment=self.deployment), - "verification": {"uuid": "uuid"}} - cfg["verifier"].manager.home_dir = "/p/a/t/h" - cfg["verifier"].manager.configfile = "/fake/path/to/config" - self.context = context.TempestContext(cfg) - self.context.conf.add_section("compute") - self.context.conf.add_section("orchestration") - self.context.conf.add_section("scenario") - - @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open(), - create=True) - def test__download_image_from_glance(self, mock_open): - self.mock_isfile.return_value = False - img_path = os.path.join(self.context.data_dir, "foo") - img = mock.MagicMock() - glanceclient = self.context.clients.glance() - glanceclient.images.data.return_value = "data" - - self.context._download_image_from_source(img_path, img) - mock_open.assert_called_once_with(img_path, "wb") - glanceclient.images.data.assert_called_once_with(img.id) - mock_open().write.assert_has_calls([mock.call("d"), - mock.call("a"), - mock.call("t"), - mock.call("a")]) - - @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open()) - @mock.patch("requests.get", return_value=mock.MagicMock(status_code=200)) - def test__download_image_from_url_success(self, mock_get, mock_open): - self.mock_isfile.return_value = False - img_path = os.path.join(self.context.data_dir, "foo") - mock_get.return_value.iter_content.return_value = "data" - - self.context._download_image_from_source(img_path) - mock_get.assert_called_once_with(CONF.openstack.img_url, stream=True) - mock_open.assert_called_once_with(img_path, "wb") - mock_open().write.assert_has_calls([mock.call("d"), - mock.call("a"), - mock.call("t"), - mock.call("a")]) - - @mock.patch("requests.get") - @ddt.data(404, 500) - def test__download_image_from_url_failure(self, status_code, mock_get): - self.mock_isfile.return_value = False - mock_get.return_value = mock.MagicMock(status_code=status_code) - self.assertRaises(exceptions.RallyException, - self.context._download_image_from_source, - os.path.join(self.context.data_dir, "foo")) - - @mock.patch("requests.get", side_effect=requests.ConnectionError()) - def test__download_image_from_url_connection_error( - self, mock_requests_get): - self.mock_isfile.return_value = False - self.assertRaises(exceptions.RallyException, - self.context._download_image_from_source, - os.path.join(self.context.data_dir, "foo")) - - @mock.patch("rally.plugins.openstack.wrappers." - "network.NeutronWrapper.create_network") - @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open()) - def test_options_configured_manually( - self, mock_open, mock_neutron_wrapper_create_network): - self.context.available_services = ["glance", "heat", "nova", "neutron"] - - self.context.conf.set("compute", "image_ref", "id1") - self.context.conf.set("compute", "image_ref_alt", "id2") - self.context.conf.set("compute", "flavor_ref", "id3") - self.context.conf.set("compute", "flavor_ref_alt", "id4") - self.context.conf.set("compute", "fixed_network_name", "name1") - self.context.conf.set("orchestration", "instance_type", "id5") - self.context.conf.set("scenario", "img_file", "id6") - - self.context.__enter__() - - glanceclient = self.context.clients.glance() - novaclient = self.context.clients.nova() - - self.assertEqual(0, glanceclient.images.create.call_count) - self.assertEqual(0, novaclient.flavors.create.call_count) - self.assertEqual(0, mock_neutron_wrapper_create_network.call_count) - - def test__create_tempest_roles(self): - role1 = CONF.openstack.swift_operator_role - role2 = CONF.openstack.swift_reseller_admin_role - role3 = CONF.openstack.heat_stack_owner_role - role4 = CONF.openstack.heat_stack_user_role - - client = self.context.clients.verified_keystone() - client.roles.list.return_value = [fakes.FakeRole(name=role1), - fakes.FakeRole(name=role2)] - client.roles.create.side_effect = [fakes.FakeFlavor(name=role3), - fakes.FakeFlavor(name=role4)] - - self.context._create_tempest_roles() - self.assertEqual(2, client.roles.create.call_count) - - created_roles = [role.name for role in self.context._created_roles] - self.assertIn(role3, created_roles) - self.assertIn(role4, created_roles) - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test__discover_image(self, mock_image): - client = mock_image.return_value - client.list_images.return_value = [fakes.FakeImage(name="Foo"), - fakes.FakeImage(name="CirrOS")] - - image = self.context._discover_image() - self.assertEqual("CirrOS", image.name) - - @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open(), - create=True) - @mock.patch("rally.plugins.openstack.services.image.image.Image") - @mock.patch("os.path.isfile", return_value=False) - def test__download_image(self, mock_isfile, mock_image, mock_open): - img_1 = mock.MagicMock() - img_1.name = "Foo" - img_2 = mock.MagicMock() - img_2.name = "CirrOS" - glanceclient = self.context.clients.glance() - glanceclient.images.data.return_value = "data" - mock_image.return_value.list_images.return_value = [img_1, img_2] - - self.context._download_image() - img_path = os.path.join(self.context.data_dir, self.context.image_name) - mock_image.return_value.list_images.assert_called_once_with( - status="active", visibility="public") - glanceclient.images.data.assert_called_once_with(img_2.id) - mock_open.assert_called_once_with(img_path, "wb") - mock_open().write.assert_has_calls([mock.call("d"), - mock.call("a"), - mock.call("t"), - mock.call("a")]) - - # We can choose any option to test the '_configure_option' method. So let's - # configure the 'flavor_ref' option. - def test__configure_option(self): - helper_method = mock.MagicMock() - helper_method.side_effect = [fakes.FakeFlavor(id="id1")] - - self.context.conf.set("compute", "flavor_ref", "") - self.context._configure_option("compute", "flavor_ref", - helper_method=helper_method, flv_ram=64) - self.assertEqual(1, helper_method.call_count) - - result = self.context.conf.get("compute", "flavor_ref") - self.assertEqual("id1", result) - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test__discover_or_create_image_when_image_exists(self, mock_image): - client = mock_image.return_value - client.list_images.return_value = [fakes.FakeImage(name="CirrOS")] - - image = self.context._discover_or_create_image() - self.assertEqual("CirrOS", image.name) - self.assertEqual(0, client.create_image.call_count) - self.assertEqual(0, len(self.context._created_images)) - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test__discover_or_create_image(self, mock_image): - client = mock_image.return_value - - image = self.context._discover_or_create_image() - self.assertEqual(image, mock_image().create_image.return_value) - self.assertEqual(self.context._created_images[0], - client.create_image.return_value) - params = {"container_format": CONF.openstack.img_container_format, - "image_location": mock.ANY, - "disk_format": CONF.openstack.img_disk_format, - "image_name": mock.ANY, - "visibility": "public"} - client.create_image.assert_called_once_with(**params) - - def test__discover_or_create_flavor_when_flavor_exists(self): - client = self.context.clients.nova() - client.flavors.list.return_value = [fakes.FakeFlavor(id="id1", ram=64, - vcpus=1, disk=0)] - - flavor = self.context._discover_or_create_flavor(64) - self.assertEqual("id1", flavor.id) - self.assertEqual(0, len(self.context._created_flavors)) - - def test__discover_or_create_flavor(self): - client = self.context.clients.nova() - client.flavors.list.return_value = [] - client.flavors.create.side_effect = [fakes.FakeFlavor(id="id1")] - - flavor = self.context._discover_or_create_flavor(64) - self.assertEqual("id1", flavor.id) - self.assertEqual("id1", self.context._created_flavors[0].id) - - def test__create_network_resources(self): - client = self.context.clients.neutron() - fake_network = { - "id": "nid1", - "name": "network", - "status": "status"} - - client.create_network.side_effect = [{"network": fake_network}] - client.create_router.side_effect = [{"router": {"id": "rid1"}}] - client.create_subnet.side_effect = [{"subnet": {"id": "subid1"}}] - client.list_networks.return_value = {"networks": []} - - network = self.context._create_network_resources() - self.assertEqual("nid1", network["id"]) - self.assertEqual("nid1", self.context._created_networks[0]["id"]) - self.assertEqual("rid1", - self.context._created_networks[0]["router_id"]) - self.assertEqual("subid1", - self.context._created_networks[0]["subnets"][0]) - - def test__cleanup_tempest_roles(self): - self.context._created_roles = [fakes.FakeRole(), fakes.FakeRole()] - - self.context._cleanup_tempest_roles() - client = self.context.clients.keystone() - self.assertEqual(2, client.roles.delete.call_count) - - @mock.patch("rally.plugins.openstack.services.image.image.Image") - def test__cleanup_images(self, mock_image): - self.context._created_images = [fakes.FakeImage(id="id1"), - fakes.FakeImage(id="id2")] - - self.context.conf.set("compute", "image_ref", "id1") - self.context.conf.set("compute", "image_ref_alt", "id2") - - image_service = mock_image.return_value - image_service.get_image.side_effect = [ - fakes.FakeImage(id="id1", status="DELETED"), - fakes.FakeImage(id="id2"), - fakes.FakeImage(id="id2", status="DELETED")] - - self.context._cleanup_images() - client = self.context.clients.glance() - client.images.delete.assert_has_calls([mock.call("id1"), - mock.call("id2")]) - - self.assertEqual("", self.context.conf.get("compute", "image_ref")) - self.assertEqual("", self.context.conf.get("compute", "image_ref_alt")) - - def test__cleanup_flavors(self): - self.context._created_flavors = [fakes.FakeFlavor(id="id1"), - fakes.FakeFlavor(id="id2"), - fakes.FakeFlavor(id="id3")] - - self.context.conf.set("compute", "flavor_ref", "id1") - self.context.conf.set("compute", "flavor_ref_alt", "id2") - self.context.conf.set("orchestration", "instance_type", "id3") - - self.context._cleanup_flavors() - client = self.context.clients.nova() - self.assertEqual(3, client.flavors.delete.call_count) - - self.assertEqual("", self.context.conf.get("compute", "flavor_ref")) - self.assertEqual("", self.context.conf.get("compute", - "flavor_ref_alt")) - self.assertEqual("", self.context.conf.get("orchestration", - "instance_type")) - - @mock.patch("rally.plugins.openstack.wrappers." - "network.NeutronWrapper.delete_network") - def test__cleanup_network_resources( - self, mock_neutron_wrapper_delete_network): - self.context._created_networks = [{"name": "net-12345"}] - self.context.conf.set("compute", "fixed_network_name", "net-12345") - - self.context._cleanup_network_resources() - self.assertEqual(1, mock_neutron_wrapper_delete_network.call_count) - self.assertEqual("", self.context.conf.get("compute", - "fixed_network_name")) - - @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open()) - @mock.patch("%s.TempestContext._configure_option" % PATH) - @mock.patch("%s.TempestContext._create_tempest_roles" % PATH) - @mock.patch("rally.verification.utils.create_dir") - def test_setup(self, mock_create_dir, - mock__create_tempest_roles, mock__configure_option, - mock_open): - verifier = mock.Mock(deployment=self.deployment) - verifier.manager.home_dir = "/p/a/t/h" - - # case #1: no neutron and heat - self.cred.clients.return_value.services.return_value = {} - - ctx = context.TempestContext({"verifier": verifier}) - ctx.conf = mock.Mock() - ctx.setup() - - ctx.conf.read.assert_called_once_with(verifier.manager.configfile) - mock_create_dir.assert_called_once_with(ctx.data_dir) - mock__create_tempest_roles.assert_called_once_with() - mock_open.assert_called_once_with(verifier.manager.configfile, "w") - ctx.conf.write(mock_open.side_effect()) - self.assertEqual( - [mock.call("DEFAULT", "log_file", "/p/a/t/h/tempest.log"), - mock.call("oslo_concurrency", "lock_path", "/p/a/t/h/lock_files"), - mock.call("scenario", "img_dir", "/p/a/t/h"), - mock.call("scenario", "img_file", ctx.image_name, - helper_method=ctx._download_image), - mock.call("compute", "image_ref", - helper_method=ctx._discover_or_create_image), - mock.call("compute", "image_ref_alt", - helper_method=ctx._discover_or_create_image), - mock.call("compute", "flavor_ref", - helper_method=ctx._discover_or_create_flavor, - flv_ram=config.CONF.openstack.flavor_ref_ram), - mock.call("compute", "flavor_ref_alt", - helper_method=ctx._discover_or_create_flavor, - flv_ram=config.CONF.openstack.flavor_ref_alt_ram)], - mock__configure_option.call_args_list) - - mock_create_dir.reset_mock() - mock__create_tempest_roles.reset_mock() - mock_open.reset_mock() - mock__configure_option.reset_mock() - - # case #2: neutron and heat are presented - self.cred.clients.return_value.services.return_value = { - "network": "neutron", "orchestration": "heat"} - - ctx = context.TempestContext({"verifier": verifier}) - neutron = ctx.clients.neutron() - neutron.list_networks.return_value = {"networks": ["fake_net"]} - ctx.conf = mock.Mock() - ctx.setup() - - ctx.conf.read.assert_called_once_with(verifier.manager.configfile) - mock_create_dir.assert_called_once_with(ctx.data_dir) - mock__create_tempest_roles.assert_called_once_with() - mock_open.assert_called_once_with(verifier.manager.configfile, "w") - ctx.conf.write(mock_open.side_effect()) - self.assertEqual( - [mock.call("DEFAULT", "log_file", "/p/a/t/h/tempest.log"), - mock.call("oslo_concurrency", "lock_path", "/p/a/t/h/lock_files"), - mock.call("scenario", "img_dir", "/p/a/t/h"), - mock.call("scenario", "img_file", ctx.image_name, - helper_method=ctx._download_image), - mock.call("compute", "image_ref", - helper_method=ctx._discover_or_create_image), - mock.call("compute", "image_ref_alt", - helper_method=ctx._discover_or_create_image), - mock.call("compute", "flavor_ref", - helper_method=ctx._discover_or_create_flavor, - flv_ram=config.CONF.openstack.flavor_ref_ram), - mock.call("compute", "flavor_ref_alt", - helper_method=ctx._discover_or_create_flavor, - flv_ram=config.CONF.openstack.flavor_ref_alt_ram), - mock.call("compute", "fixed_network_name", - helper_method=ctx._create_network_resources), - mock.call("orchestration", "instance_type", - helper_method=ctx._discover_or_create_flavor, - flv_ram=config.CONF.openstack.heat_instance_type_ram)], - mock__configure_option.call_args_list) diff --git a/tests/unit/plugins/openstack/verification/tempest/test_manager.py b/tests/unit/plugins/openstack/verification/tempest/test_manager.py deleted file mode 100644 index a680ae76b4..0000000000 --- a/tests/unit/plugins/openstack/verification/tempest/test_manager.py +++ /dev/null @@ -1,243 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import subprocess - -import mock - -from rally import exceptions -from rally.plugins.openstack.verification.tempest import manager -from tests.unit import test - - -PATH = "rally.plugins.openstack.verification.tempest.manager" - - -class TempestManagerTestCase(test.TestCase): - - def test_run_environ_property(self): - mock.patch("%s.testr.TestrLauncher.run_environ" % PATH, - new={"some": "key"}).start() - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - env = {"some": "key", - "OS_TEST_PATH": os.path.join(tempest.repo_dir, - "tempest/test_discover"), - "TEMPEST_CONFIG": "tempest.conf", - "TEMPEST_CONFIG_DIR": os.path.dirname(tempest.configfile)} - - self.assertEqual(env, tempest.run_environ) - - def test_configfile_property(self): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - self.assertEqual(os.path.join(tempest.home_dir, "tempest.conf"), - tempest.configfile) - - @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open()) - def test_get_configuration(self, mock_open): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - tempest.get_configuration() - - mock_open.assert_called_once_with(tempest.configfile) - mock_open.side_effect().read.assert_called_once_with() - - @mock.patch("%s.config.TempestConfigfileManager" % PATH) - def test_configure(self, mock_tempest_configfile_manager): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - cm = mock_tempest_configfile_manager.return_value - extra_options = mock.Mock() - - self.assertEqual(cm.create.return_value, - tempest.configure(extra_options)) - mock_tempest_configfile_manager.assert_called_once_with( - tempest.verifier.deployment) - cm.create.assert_called_once_with(tempest.configfile, extra_options) - - @mock.patch("%s.config.os.path.exists" % PATH) - def test_is_configured(self, mock_exists): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - self.assertTrue(tempest.is_configured()) - - @mock.patch("rally.verification.utils.extend_configfile") - def test_extend_configuration(self, mock_extend_configfile): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - extra_options = mock.Mock() - self.assertEqual(mock_extend_configfile.return_value, - tempest.extend_configuration(extra_options)) - mock_extend_configfile.assert_called_once_with(extra_options, - tempest.configfile) - - @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open()) - def test_override_configuration(self, mock_open): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - new_content = mock.Mock() - - tempest.override_configuration(new_content) - - mock_open.assert_called_once_with(tempest.configfile, "w") - mock_open.side_effect().write.assert_called_once_with(new_content) - - @mock.patch("%s.os.path.exists" % PATH) - @mock.patch("%s.utils.check_output" % PATH) - @mock.patch("%s.TempestManager.check_system_wide" % PATH) - def test_install_extension(self, mock_check_system_wide, mock_check_output, - mock_exists): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd", - system_wide=True)) - e = self.assertRaises(NotImplementedError, tempest.install_extension, - None, None, {"key": "value"}) - self.assertIn("verifiers don't support extra installation settings", - "%s" % e) - - test_reqs_path = os.path.join(tempest.base_dir, "extensions", - "example", "test-requirements.txt") - - # case #1 system-wide installation - source = "https://github.com/example/example" - tempest.install_extension(source) - - path = os.path.join(tempest.base_dir, "extensions") - mock_check_output.assert_called_once_with( - ["pip", "install", "--no-deps", "--src", path, "-e", - "git+https://github.com/example/example@master#egg=example"], - cwd=tempest.base_dir, env=tempest.environ) - mock_check_system_wide.assert_called_once_with( - reqs_file_path=test_reqs_path) - - mock_check_output.reset_mock() - - # case #2 virtual env with specified version - tempest.verifier.system_wide = False - version = "some" - tempest.install_extension(source, version=version) - - self.assertEqual([ - mock.call([ - "pip", "install", "--src", path, "-e", - "git+https://github.com/example/example@some#egg=example"], - cwd=tempest.base_dir, env=tempest.environ), - mock.call(["pip", "install", "-r", test_reqs_path], - cwd=tempest.base_dir, env=tempest.environ)], - mock_check_output.call_args_list) - - @mock.patch("%s.utils.check_output" % PATH) - def test_list_extensions(self, mock_check_output): - plugins_list = [ - {"name": "some", "entry_point": "foo.bar", "location": "/tmp"}, - {"name": "another", "entry_point": "bar.foo", "location": "/tmp"} - ] - mock_check_output.return_value = json.dumps(plugins_list) - - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - - self.assertEqual(plugins_list, tempest.list_extensions()) - self.assertEqual(1, mock_check_output.call_count) - mock_check_output.reset_mock() - - mock_check_output.side_effect = subprocess.CalledProcessError("", "") - self.assertRaises(exceptions.RallyException, tempest.list_extensions) - self.assertEqual(1, mock_check_output.call_count) - - @mock.patch("%s.TempestManager.list_extensions" % PATH) - @mock.patch("%s.os.path.exists" % PATH) - @mock.patch("%s.shutil.rmtree" % PATH) - def test_uninstall_extension(self, mock_rmtree, mock_exists, - mock_list_extensions): - plugins_list = [ - {"name": "some", "entry_point": "foo.bar", "location": "/tmp"}, - {"name": "another", "entry_point": "bar.foo", "location": "/tmp"} - ] - mock_list_extensions.return_value = plugins_list - - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - - tempest.uninstall_extension("some") - mock_rmtree.assert_called_once_with(plugins_list[0]["location"]) - mock_list_extensions.assert_called_once_with() - - mock_rmtree.reset_mock() - mock_list_extensions.reset_mock() - - self.assertRaises(exceptions.RallyException, - tempest.uninstall_extension, "unexist") - - mock_list_extensions.assert_called_once_with() - self.assertFalse(mock_rmtree.called) - - @mock.patch("%s.TempestManager._transform_pattern" % PATH) - @mock.patch("%s.testr.TestrLauncher.list_tests" % PATH) - def test_list_tests(self, mock_testr_launcher_list_tests, - mock__transform_pattern): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - - self.assertEqual(mock_testr_launcher_list_tests.return_value, - tempest.list_tests()) - mock_testr_launcher_list_tests.assert_called_once_with("") - self.assertFalse(mock__transform_pattern.called) - mock_testr_launcher_list_tests.reset_mock() - - pattern = mock.Mock() - - self.assertEqual(mock_testr_launcher_list_tests.return_value, - tempest.list_tests(pattern)) - mock_testr_launcher_list_tests.assert_called_once_with( - mock__transform_pattern.return_value) - mock__transform_pattern.assert_called_once_with(pattern) - - @mock.patch("%s.testr.TestrLauncher.validate_args" % PATH) - def test_validate_args(self, mock_testr_launcher_validate_args): - tm = manager.TempestManager(mock.Mock()) - tm.validate_args({}) - tm.validate_args({"pattern": "some.test"}) - tm.validate_args({"pattern": "set=smoke"}) - tm.validate_args({"pattern": "set=compute"}) - tm.validate_args({"pattern": "set=full"}) - - e = self.assertRaises(exceptions.ValidationError, tm.validate_args, - {"pattern": "foo=bar"}) - self.assertEqual("Validation error: 'pattern' argument should be a " - "regexp or set name (format: 'tempest.api.identity." - "v3', 'set=smoke').", "%s" % e) - - e = self.assertRaises(exceptions.ValidationError, tm.validate_args, - {"pattern": "set=foo"}) - self.assertIn("Test set 'foo' not found in available Tempest test " - "sets. Available sets are ", "%s" % e) - - def test__transform_pattern(self): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - - self.assertEqual("foo", tempest._transform_pattern("foo")) - self.assertEqual("foo=bar", tempest._transform_pattern("foo=bar")) - self.assertEqual("", tempest._transform_pattern("set=full")) - self.assertEqual("smoke", tempest._transform_pattern("set=smoke")) - self.assertEqual("tempest.bar", tempest._transform_pattern("set=bar")) - self.assertEqual("tempest.api.compute", - tempest._transform_pattern("set=compute")) - - @mock.patch("%s.TempestManager._transform_pattern" % PATH) - def test_prepare_run_args(self, mock__transform_pattern): - tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd")) - - self.assertEqual({}, tempest.prepare_run_args({})) - self.assertFalse(mock__transform_pattern.called) - - self.assertEqual({"foo": "bar"}, - tempest.prepare_run_args({"foo": "bar"})) - self.assertFalse(mock__transform_pattern.called) - - pattern = mock.Mock() - self.assertEqual({"pattern": mock__transform_pattern.return_value}, - tempest.prepare_run_args({"pattern": pattern})) - mock__transform_pattern.assert_called_once_with(pattern) diff --git a/tests/unit/plugins/openstack/wrappers/__init__.py b/tests/unit/plugins/openstack/wrappers/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/openstack/wrappers/test_cinder.py b/tests/unit/plugins/openstack/wrappers/test_cinder.py deleted file mode 100644 index 9edacc870a..0000000000 --- a/tests/unit/plugins/openstack/wrappers/test_cinder.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally import exceptions -from rally.plugins.openstack.wrappers import cinder as cinder_wrapper -from tests.unit import test - - -@ddt.ddt -class CinderWrapperTestCase(test.ScenarioTestCase): - - @ddt.data( - {"version": "1", "expected_class": cinder_wrapper.CinderV1Wrapper}, - {"version": "2", "expected_class": cinder_wrapper.CinderV2Wrapper} - ) - @ddt.unpack - def test_wrap(self, version, expected_class): - client = mock.MagicMock() - client.choose_version.return_value = version - self.assertIsInstance(cinder_wrapper.wrap(client, mock.Mock()), - expected_class) - - @mock.patch("rally.plugins.openstack.wrappers.cinder.LOG") - def test_wrap_wrong_version(self, mock_log): - client = mock.MagicMock() - client.choose_version.return_value = "dummy" - self.assertRaises(exceptions.InvalidArgumentsException, - cinder_wrapper.wrap, client, mock.Mock()) - self.assertTrue(mock_log.warning.mock_called) - - -class CinderV1WrapperTestCase(test.TestCase): - def setUp(self): - super(CinderV1WrapperTestCase, self).setUp() - self.client = mock.MagicMock() - self.client.choose_version.return_value = "1" - self.owner = mock.Mock() - self.wrapped_client = cinder_wrapper.wrap(self.client, self.owner) - - def test_create_volume(self): - self.wrapped_client.create_volume(1, display_name="fake_vol") - self.client.return_value.volumes.create.assert_called_once_with( - 1, display_name=self.owner.generate_random_name.return_value) - - def test_update_volume(self): - self.wrapped_client.update_volume("fake_id", display_name="fake_vol", - display_description="_updated") - self.client.return_value.volumes.update.assert_called_once_with( - "fake_id", - display_name=self.owner.generate_random_name.return_value, - display_description="_updated") - - def test_create_snapshot(self): - self.wrapped_client.create_snapshot("fake_id", - display_name="fake_snap") - (self.client.return_value.volume_snapshots.create. - assert_called_once_with( - "fake_id", - display_name=self.owner.generate_random_name.return_value)) - - -class CinderV2WrapperTestCase(test.TestCase): - def setUp(self): - super(CinderV2WrapperTestCase, self).setUp() - self.client = mock.MagicMock() - self.client.choose_version.return_value = "2" - self.owner = mock.Mock() - self.wrapped_client = cinder_wrapper.wrap(self.client, self.owner) - - def test_create_volume(self): - self.wrapped_client.create_volume(1, name="fake_vol") - self.client.return_value.volumes.create.assert_called_once_with( - 1, name=self.owner.generate_random_name.return_value) - - def test_create_snapshot(self): - self.wrapped_client.create_snapshot("fake_id", name="fake_snap") - (self.client.return_value.volume_snapshots.create. - assert_called_once_with( - "fake_id", - name=self.owner.generate_random_name.return_value)) - - def test_update_volume(self): - self.wrapped_client.update_volume("fake_id", name="fake_vol", - description="_updated") - self.client.return_value.volumes.update.assert_called_once_with( - "fake_id", name=self.owner.generate_random_name.return_value, - description="_updated") diff --git a/tests/unit/plugins/openstack/wrappers/test_glance.py b/tests/unit/plugins/openstack/wrappers/test_glance.py deleted file mode 100644 index 02f926f410..0000000000 --- a/tests/unit/plugins/openstack/wrappers/test_glance.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tempfile - -import ddt -from glanceclient import exc as glance_exc -import mock - -from rally.common import cfg -from rally import exceptions -from rally.plugins.openstack.wrappers import glance as glance_wrapper -from tests.unit import test - -CONF = cfg.CONF - - -@ddt.ddt -class GlanceWrapperTestCase(test.ScenarioTestCase): - - @ddt.data( - {"version": "1", "expected_class": glance_wrapper.GlanceV1Wrapper}, - {"version": "2", "expected_class": glance_wrapper.GlanceV2Wrapper} - ) - @ddt.unpack - def test_wrap(self, version, expected_class): - client = mock.MagicMock() - client.choose_version.return_value = version - self.assertIsInstance(glance_wrapper.wrap(client, mock.Mock()), - expected_class) - - @mock.patch("rally.plugins.openstack.wrappers.glance.LOG") - def test_wrap_wrong_version(self, mock_log): - client = mock.MagicMock() - client.choose_version.return_value = "dummy" - self.assertRaises(exceptions.InvalidArgumentsException, - glance_wrapper.wrap, client, mock.Mock()) - self.assertTrue(mock_log.warning.mock_called) - - -@ddt.ddt -class GlanceV1WrapperTestCase(test.ScenarioTestCase): - _tempfile = tempfile.NamedTemporaryFile() - - def setUp(self): - super(GlanceV1WrapperTestCase, self).setUp() - self.client = mock.MagicMock() - self.client.choose_version.return_value = "1" - self.owner = mock.Mock() - self.wrapped_client = glance_wrapper.wrap(self.client, self.owner) - - def test_get_image(self): - image = mock.Mock() - - return_image = self.wrapped_client.get_image(image) - - self.client.return_value.images.get.assert_called_once_with(image.id) - self.assertEqual(return_image, - self.client.return_value.images.get.return_value) - - def test_get_image_not_found(self): - image = mock.Mock() - self.client.return_value.images.get.side_effect = ( - glance_exc.HTTPNotFound) - - self.assertRaises(exceptions.GetResourceNotFound, - self.wrapped_client.get_image, image) - self.client.return_value.images.get.assert_called_once_with(image.id) - - @ddt.data( - {"location": "image_location", "visibility": "private"}, - {"location": "image_location", "fakearg": "fake"}, - {"location": "image_location", "name": "image_name"}, - {"location": _tempfile.name, "visibility": "public"}) - @ddt.unpack - @mock.patch("six.moves.builtins.open") - def test_create_image(self, mock_open, location, **kwargs): - return_image = self.wrapped_client.create_image("container_format", - location, - "disk_format", - **kwargs) - call_args = kwargs - call_args["container_format"] = "container_format" - call_args["disk_format"] = "disk_format" - if location.startswith("/"): - call_args["data"] = mock_open.return_value - mock_open.assert_called_once_with(location) - mock_open.return_value.close.assert_called_once_with() - else: - call_args["copy_from"] = location - if "name" not in kwargs: - call_args["name"] = self.owner.generate_random_name.return_value - if "visibility" in kwargs: - call_args["is_public"] = call_args.pop("visibility") == "public" - - self.client().images.create.assert_called_once_with(**call_args) - - self.mock_wait_for_status.mock.assert_called_once_with( - self.client().images.create.return_value, ["active"], - update_resource=self.wrapped_client.get_image, - check_interval=CONF.openstack.glance_image_create_poll_interval, - timeout=CONF.openstack.glance_image_create_timeout) - self.assertEqual(self.mock_wait_for_status.mock.return_value, - return_image) - - @ddt.data({"expected": True}, - {"visibility": "public", "expected": True}, - {"visibility": "private", "expected": False}) - @ddt.unpack - def test_set_visibility(self, visibility=None, expected=None): - image = mock.Mock() - if visibility is None: - self.wrapped_client.set_visibility(image) - else: - self.wrapped_client.set_visibility(image, visibility=visibility) - self.client().images.update.assert_called_once_with( - image.id, is_public=expected) - - @ddt.data({}, {"fakearg": "fake"}) - def test_list_images_basic(self, filters): - self.assertEqual(self.wrapped_client.list_images(**filters), - self.client().images.list.return_value) - self.client().images.list.assert_called_once_with(filters=filters) - - def test_list_images_with_owner(self): - self.assertEqual(self.wrapped_client.list_images(fakearg="fake", - owner="fakeowner"), - self.client().images.list.return_value) - self.client().images.list.assert_called_once_with( - owner="fakeowner", filters={"fakearg": "fake"}) - - def test_list_images_visibility_public(self): - public_images = [mock.Mock(is_public=True), mock.Mock(is_public=True)] - private_images = [mock.Mock(is_public=False), - mock.Mock(is_public=False)] - self.client().images.list.return_value = public_images + private_images - self.assertEqual(self.wrapped_client.list_images(fakearg="fake", - visibility="public"), - public_images) - self.client().images.list.assert_called_once_with( - filters={"fakearg": "fake"}) - - def test_list_images_visibility_private(self): - public_images = [mock.Mock(is_public=True), mock.Mock(is_public=True)] - private_images = [mock.Mock(is_public=False), - mock.Mock(is_public=False)] - self.client().images.list.return_value = public_images + private_images - self.assertEqual(self.wrapped_client.list_images(fakearg="fake", - visibility="private"), - private_images) - self.client().images.list.assert_called_once_with( - filters={"fakearg": "fake"}) - - -@ddt.ddt -class GlanceV2WrapperTestCase(test.ScenarioTestCase): - _tempfile = tempfile.NamedTemporaryFile() - - def setUp(self): - super(GlanceV2WrapperTestCase, self).setUp() - self.client = mock.MagicMock() - self.client.choose_version.return_value = "2" - self.owner = mock.Mock() - self.wrapped_client = glance_wrapper.wrap(self.client, self.owner) - - def test_get_image(self): - image = mock.Mock() - - return_image = self.wrapped_client.get_image(image) - - self.client.return_value.images.get.assert_called_once_with(image.id) - self.assertEqual(return_image, - self.client.return_value.images.get.return_value) - - def test_get_image_not_found(self): - image = mock.Mock() - self.client.return_value.images.get.side_effect = ( - glance_exc.HTTPNotFound) - - self.assertRaises(exceptions.GetResourceNotFound, - self.wrapped_client.get_image, image) - self.client.return_value.images.get.assert_called_once_with(image.id) - - @ddt.data( - {"location": "image_location", "visibility": "private"}, - {"location": "image_location", "fakearg": "fake"}, - {"location": "image_location", "name": "image_name"}, - {"location": _tempfile.name, "visibility": "public"}, - {"location": "image_location", - "expected_kwargs": {"visibility": "public"}, "is_public": True}) - @ddt.unpack - @mock.patch("six.moves.builtins.open") - @mock.patch("requests.get") - def test_create_image(self, mock_requests_get, mock_open, location, - expected_kwargs=None, **kwargs): - self.wrapped_client.get_image = mock.Mock() - created_image = mock.Mock() - uploaded_image = mock.Mock() - self.mock_wait_for_status.mock.side_effect = [created_image, - uploaded_image] - - return_image = self.wrapped_client.create_image("container_format", - location, - "disk_format", - **kwargs) - create_args = expected_kwargs or kwargs - create_args["container_format"] = "container_format" - create_args["disk_format"] = "disk_format" - create_args.setdefault("name", - self.owner.generate_random_name.return_value) - - self.client().images.create.assert_called_once_with(**create_args) - - if location.startswith("/"): - data = mock_open.return_value - mock_open.assert_called_once_with(location) - else: - data = mock_requests_get.return_value.raw - mock_requests_get.assert_called_once_with(location, stream=True) - data.close.assert_called_once_with() - self.client().images.upload.assert_called_once_with(created_image.id, - data) - - self.mock_wait_for_status.mock.assert_has_calls([ - mock.call( - self.client().images.create.return_value, ["queued"], - update_resource=self.wrapped_client.get_image, - check_interval=CONF.openstack. - glance_image_create_poll_interval, - timeout=CONF.openstack.glance_image_create_timeout), - mock.call( - created_image, ["active"], - update_resource=self.wrapped_client.get_image, - check_interval=CONF.openstack. - glance_image_create_poll_interval, - timeout=mock.ANY)]) - self.assertEqual(uploaded_image, return_image) - - @ddt.data({}, - {"visibility": "public"}, - {"visibility": "private"}) - @ddt.unpack - def test_set_visibility(self, visibility=None): - image = mock.Mock() - if visibility is None: - self.wrapped_client.set_visibility(image) - visibility = "public" - else: - self.wrapped_client.set_visibility(image, visibility=visibility) - self.client().images.update.assert_called_once_with( - image.id, visibility=visibility) - - @ddt.data({}, {"fakearg": "fake"}) - def test_list_images(self, filters): - self.assertEqual(self.wrapped_client.list_images(**filters), - self.client().images.list.return_value) - self.client().images.list.assert_called_once_with(filters=filters) diff --git a/tests/unit/plugins/openstack/wrappers/test_keystone.py b/tests/unit/plugins/openstack/wrappers/test_keystone.py deleted file mode 100644 index 4942b38f17..0000000000 --- a/tests/unit/plugins/openstack/wrappers/test_keystone.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneclient import exceptions -import mock - -from rally.plugins.openstack.wrappers import keystone -from tests.unit import test - - -class KeystoneWrapperTestBase(object): - def test_list_services(self): - service = mock.MagicMock() - service.id = "fake_id" - service.name = "Foobar" - service.extra_field = "extra_field" - self.client.services.list.return_value = [service] - result = list(self.wrapped_client.list_services()) - self.assertEqual([("fake_id", "Foobar")], result) - self.assertEqual("fake_id", result[0].id) - self.assertEqual("Foobar", result[0].name) - self.assertFalse(hasattr(result[0], "extra_field")) - - def test_wrap(self): - client = mock.MagicMock() - client.version = "dummy" - self.assertRaises(NotImplementedError, keystone.wrap, client) - - def test_delete_service(self): - self.wrapped_client.delete_service("fake_id") - self.client.services.delete.assert_called_once_with("fake_id") - - def test_list_roles(self): - role = mock.MagicMock() - role.id = "fake_id" - role.name = "Foobar" - role.extra_field = "extra_field" - self.client.roles.list.return_value = [role] - result = list(self.wrapped_client.list_roles()) - self.assertEqual([("fake_id", "Foobar")], result) - self.assertEqual("fake_id", result[0].id) - self.assertEqual("Foobar", result[0].name) - self.assertFalse(hasattr(result[0], "extra_field")) - - def test_delete_role(self): - self.wrapped_client.delete_role("fake_id") - self.client.roles.delete.assert_called_once_with("fake_id") - - -class KeystoneV2WrapperTestCase(test.TestCase, KeystoneWrapperTestBase): - def setUp(self): - super(KeystoneV2WrapperTestCase, self).setUp() - self.client = mock.MagicMock() - self.client.version = "v2.0" - self.wrapped_client = keystone.wrap(self.client) - - def test_create_project(self): - self.wrapped_client.create_project("Foobar") - self.client.tenants.create.assert_called_once_with("Foobar") - - def test_create_project_in_non_default_domain_fail(self): - self.assertRaises( - NotImplementedError, self.wrapped_client.create_project, - "Foobar", "non-default-domain") - - def test_delete_project(self): - self.wrapped_client.delete_project("fake_id") - self.client.tenants.delete.assert_called_once_with("fake_id") - - def test_list_projects(self): - tenant = mock.MagicMock() - tenant.id = "fake_id" - tenant.name = "Foobar" - tenant.extra_field = "extra_field" - self.client.tenants.list.return_value = [tenant] - result = list(self.wrapped_client.list_projects()) - self.assertEqual([("fake_id", "Foobar", "default")], result) - self.assertEqual("fake_id", result[0].id) - self.assertEqual("Foobar", result[0].name) - self.assertEqual("default", result[0].domain_id) - self.assertFalse(hasattr(result[0], "extra_field")) - - def test_create_user(self): - self.wrapped_client.create_user("foo", "bar", email="foo@bar.com", - project_id="tenant_id", - domain_name="default") - self.client.users.create.assert_called_once_with( - "foo", "bar", "foo@bar.com", "tenant_id") - - def test_create_user_in_non_default_domain_fail(self): - self.assertRaises( - NotImplementedError, self.wrapped_client.create_user, - "foo", "bar", email="foo@bar.com", project_id="tenant_id", - domain_name="non-default-domain") - - def test_delete_user(self): - self.wrapped_client.delete_user("fake_id") - self.client.users.delete.assert_called_once_with("fake_id") - - def test_list_users(self): - user = mock.MagicMock() - user.id = "fake_id" - user.name = "foo" - user.tenantId = "tenant_id" - user.extra_field = "extra_field" - self.client.users.list.return_value = [user] - result = list(self.wrapped_client.list_users()) - self.assertEqual([("fake_id", "foo", "tenant_id", "default")], result) - self.assertEqual("fake_id", result[0].id) - self.assertEqual("foo", result[0].name) - self.assertEqual("tenant_id", result[0].project_id) - self.assertEqual("default", result[0].domain_id) - self.assertFalse(hasattr(result[0], "extra_field")) - - def test_create_role(self): - self.wrapped_client.create_role("foo_name") - self.client.roles.create.assert_called_once_with("foo_name") - - def test_add_role(self): - self.wrapped_client.add_role("fake_role_id", "fake_user_id", - "fake_project_id") - self.client.roles.add_user_role.assert_called_once_with( - "fake_user_id", "fake_role_id", tenant="fake_project_id") - - def test_remove_role(self): - self.wrapped_client.remove_role("fake_role_id", "fake_user_id", - "fake_project_id") - self.client.roles.remove_user_role.assert_called_once_with( - "fake_user_id", "fake_role_id", tenant="fake_project_id") - - -class KeystoneV3WrapperTestCase(test.TestCase, KeystoneWrapperTestBase): - def setUp(self): - super(KeystoneV3WrapperTestCase, self).setUp() - self.client = mock.MagicMock() - self.client.version = "v3" - self.wrapped_client = keystone.wrap(self.client) - self.client.domains.get.side_effect = exceptions.NotFound - self.client.domains.list.return_value = [ - mock.MagicMock(id="domain_id")] - - def test_create_project(self): - self.wrapped_client.create_project("Foobar", "domain") - self.client.projects.create.assert_called_once_with( - name="Foobar", domain="domain_id") - - def test_create_project_with_non_existing_domain_fail(self): - self.client.domains.list.return_value = [] - self.assertRaises(exceptions.NotFound, - self.wrapped_client.create_project, - "Foobar", "non-existing-domain") - - def test_delete_project(self): - self.wrapped_client.delete_project("fake_id") - self.client.projects.delete.assert_called_once_with("fake_id") - - def test_list_projects(self): - project = mock.MagicMock() - project.id = "fake_id" - project.name = "Foobar" - project.domain_id = "domain_id" - project.extra_field = "extra_field" - self.client.projects.list.return_value = [project] - result = list(self.wrapped_client.list_projects()) - self.assertEqual([("fake_id", "Foobar", "domain_id")], result) - self.assertEqual("fake_id", result[0].id) - self.assertEqual("Foobar", result[0].name) - self.assertEqual("domain_id", result[0].domain_id) - self.assertFalse(hasattr(result[0], "extra_field")) - - def test_create_user(self): - fake_role = mock.MagicMock(id="fake_role_id") - fake_role.name = "__member__" - self.client.roles.list.return_value = [fake_role] - self.client.users.create.return_value = mock.MagicMock( - id="fake_user_id") - - self.wrapped_client.create_user( - "foo", "bar", email="foo@bar.com", - project_id="project_id", domain_name="domain") - self.client.users.create.assert_called_once_with( - name="foo", password="bar", - email="foo@bar.com", default_project="project_id", - domain="domain_id") - - def test_create_user_with_non_existing_domain_fail(self): - self.client.domains.list.return_value = [] - self.assertRaises(exceptions.NotFound, - self.wrapped_client.create_user, "foo", "bar", - email="foo@bar.com", project_id="project_id", - domain_name="non-existing-domain") - - def test_delete_user(self): - self.wrapped_client.delete_user("fake_id") - self.client.users.delete.assert_called_once_with("fake_id") - - def test_list_users(self): - user = mock.MagicMock() - user.id = "fake_id" - user.name = "foo" - user.default_project_id = "project_id" - user.domain_id = "domain_id" - user.extra_field = "extra_field" - self.client.users.list.return_value = [user] - result = list(self.wrapped_client.list_users()) - self.assertEqual([("fake_id", "foo", "project_id", "domain_id")], - result) - self.assertEqual("fake_id", result[0].id) - self.assertEqual("foo", result[0].name) - self.assertEqual("project_id", result[0].project_id) - self.assertEqual("domain_id", result[0].domain_id) - self.assertFalse(hasattr(result[0], "extra_field")) - - def test_create_role(self, **kwargs): - self.wrapped_client.create_role("foo_name", domain="domain", - **kwargs) - self.client.roles.create.assert_called_once_with( - "foo_name", domain="domain", **kwargs) - - def test_add_role(self): - self.wrapped_client.add_role("fake_role_id", "fake_user_id", - "fake_project_id") - self.client.roles.grant.assert_called_once_with( - "fake_role_id", user="fake_user_id", project="fake_project_id") - - def test_remove_role(self): - self.wrapped_client.remove_role("fake_role_id", "fake_user_id", - "fake_project_id") - self.client.roles.revoke.assert_called_once_with( - "fake_role_id", user="fake_user_id", project="fake_project_id") diff --git a/tests/unit/plugins/openstack/wrappers/test_network.py b/tests/unit/plugins/openstack/wrappers/test_network.py deleted file mode 100644 index f04db907ec..0000000000 --- a/tests/unit/plugins/openstack/wrappers/test_network.py +++ /dev/null @@ -1,501 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ddt -import mock - -from rally.common import utils -from rally import consts -from rally.plugins.openstack.wrappers import network -from tests.unit import test - -from neutronclient.common import exceptions as neutron_exceptions - -SVC = "rally.plugins.openstack.wrappers.network." - - -class Owner(utils.RandomNameGeneratorMixin): - task = {"uuid": "task-uuid"} - - -@ddt.ddt -class NeutronWrapperTestCase(test.TestCase): - def setUp(self): - self.owner = Owner() - self.owner.generate_random_name = mock.Mock() - super(NeutronWrapperTestCase, self).setUp() - - def get_wrapper(self, *skip_cidrs, **kwargs): - return network.NeutronWrapper(mock.Mock(), self.owner, config=kwargs) - - def test_SUBNET_IP_VERSION(self): - self.assertEqual(4, network.NeutronWrapper.SUBNET_IP_VERSION) - - @mock.patch("rally.plugins.openstack.wrappers.network.generate_cidr") - def test__generate_cidr(self, mock_generate_cidr): - cidrs = iter(range(5)) - mock_generate_cidr.side_effect = ( - lambda start_cidr: start_cidr + next(cidrs) - ) - service = self.get_wrapper(start_cidr=3) - self.assertEqual(3, service._generate_cidr()) - self.assertEqual(4, service._generate_cidr()) - self.assertEqual(5, service._generate_cidr()) - self.assertEqual(6, service._generate_cidr()) - self.assertEqual(7, service._generate_cidr()) - self.assertEqual([mock.call(start_cidr=3)] * 5, - mock_generate_cidr.mock_calls) - - def test_external_networks(self): - wrap = self.get_wrapper() - wrap.client.list_networks.return_value = {"networks": "foo_networks"} - self.assertEqual("foo_networks", wrap.external_networks) - wrap.client.list_networks.assert_called_once_with( - **{"router:external": True}) - - def test_get_network(self): - wrap = self.get_wrapper() - neutron_net = {"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "tenant_id": "foo_tenant", - "status": "foo_status", - "router:external": "foo_external", - "subnets": "foo_subnets"} - expected_net = {"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "tenant_id": "foo_tenant", - "status": "foo_status", - "external": "foo_external", - "router_id": None, - "subnets": "foo_subnets"} - wrap.client.show_network.return_value = {"network": neutron_net} - net = wrap.get_network(net_id="foo_id") - self.assertEqual(expected_net, net) - wrap.client.show_network.assert_called_once_with("foo_id") - - wrap.client.show_network.side_effect = ( - neutron_exceptions.NeutronClientException) - self.assertRaises(network.NetworkWrapperException, wrap.get_network, - net_id="foo_id") - - wrap.client.list_networks.return_value = {"networks": [neutron_net]} - net = wrap.get_network(name="foo_name") - self.assertEqual(expected_net, net) - wrap.client.list_networks.assert_called_once_with(name="foo_name") - - wrap.client.list_networks.return_value = {"networks": []} - self.assertRaises(network.NetworkWrapperException, wrap.get_network, - name="foo_name") - - def test_create_v1_pool(self): - subnet = "subnet_id" - tenant = "foo_tenant" - service = self.get_wrapper() - expected_pool = {"pool": { - "id": "pool_id", - "name": self.owner.generate_random_name.return_value, - "subnet_id": subnet, - "tenant_id": tenant}} - service.client.create_pool.return_value = expected_pool - resultant_pool = service.create_v1_pool(tenant, subnet) - service.client.create_pool.assert_called_once_with({ - "pool": {"lb_method": "ROUND_ROBIN", - "subnet_id": subnet, - "tenant_id": tenant, - "protocol": "HTTP", - "name": self.owner.generate_random_name.return_value}}) - self.assertEqual(expected_pool, resultant_pool) - - def test_create_network(self): - service = self.get_wrapper() - service.client.create_network.return_value = { - "network": {"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "status": "foo_status"}} - net = service.create_network("foo_tenant") - service.client.create_network.assert_called_once_with({ - "network": {"tenant_id": "foo_tenant", - "name": self.owner.generate_random_name.return_value}}) - self.assertEqual({"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "status": "foo_status", - "external": False, - "tenant_id": "foo_tenant", - "router_id": None, - "subnets": []}, net) - - def test_create_network_with_subnets(self): - subnets_num = 4 - service = self.get_wrapper() - subnets_cidrs = iter(range(subnets_num)) - subnets_ids = iter(range(subnets_num)) - service._generate_cidr = mock.Mock( - side_effect=lambda v: "cidr-%d" % next(subnets_cidrs)) - service.client.create_subnet = mock.Mock( - side_effect=lambda i: { - "subnet": {"id": "subnet-%d" % next(subnets_ids)}}) - service.client.create_network.return_value = { - "network": {"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "status": "foo_status"}} - - net = service.create_network("foo_tenant", subnets_num=subnets_num) - - service.client.create_network.assert_called_once_with({ - "network": {"tenant_id": "foo_tenant", - "name": self.owner.generate_random_name.return_value}}) - self.assertEqual({"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "status": "foo_status", - "external": False, - "router_id": None, - "tenant_id": "foo_tenant", - "subnets": ["subnet-%d" % i - for i in range(subnets_num)]}, net) - self.assertEqual( - service.client.create_subnet.mock_calls, - [mock.call({"subnet": - {"name": self.owner.generate_random_name.return_value, - "enable_dhcp": True, - "network_id": "foo_id", - "tenant_id": "foo_tenant", - "ip_version": service.SUBNET_IP_VERSION, - "dns_nameservers": ["8.8.8.8", "8.8.4.4"], - "cidr": "cidr-%d" % i}}) - for i in range(subnets_num)]) - - def test_create_network_with_router(self): - service = self.get_wrapper() - service.create_router = mock.Mock(return_value={"id": "foo_router"}) - service.client.create_network.return_value = { - "network": {"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "status": "foo_status"}} - net = service.create_network("foo_tenant", add_router=True) - self.assertEqual({"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "status": "foo_status", - "external": False, - "tenant_id": "foo_tenant", - "router_id": "foo_router", - "subnets": []}, net) - service.create_router.assert_called_once_with(external=True, - tenant_id="foo_tenant") - - def test_create_network_with_router_and_subnets(self): - subnets_num = 4 - service = self.get_wrapper() - service._generate_cidr = mock.Mock(return_value="foo_cidr") - service.create_router = mock.Mock(return_value={"id": "foo_router"}) - service.client.create_subnet = mock.Mock( - return_value={"subnet": {"id": "foo_subnet"}}) - service.client.create_network.return_value = { - "network": {"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "status": "foo_status"}} - net = service.create_network("foo_tenant", add_router=True, - subnets_num=subnets_num, - dns_nameservers=["foo_nameservers"]) - self.assertEqual({"id": "foo_id", - "name": self.owner.generate_random_name.return_value, - "status": "foo_status", - "external": False, - "tenant_id": "foo_tenant", - "router_id": "foo_router", - "subnets": ["foo_subnet"] * subnets_num}, net) - service.create_router.assert_called_once_with(external=True, - tenant_id="foo_tenant") - self.assertEqual( - service.client.create_subnet.mock_calls, - [mock.call({"subnet": - {"name": self.owner.generate_random_name.return_value, - "enable_dhcp": True, - "network_id": "foo_id", - "tenant_id": "foo_tenant", - "ip_version": service.SUBNET_IP_VERSION, - "dns_nameservers": ["foo_nameservers"], - "cidr": "foo_cidr"}})] * subnets_num) - self.assertEqual(service.client.add_interface_router.mock_calls, - [mock.call("foo_router", {"subnet_id": "foo_subnet"}) - for i in range(subnets_num)]) - - @mock.patch("rally.plugins.openstack.wrappers.network.NeutronWrapper" - ".supports_extension", return_value=(False, "")) - def test_delete_network(self, mock_neutron_wrapper_supports_extension): - service = self.get_wrapper() - service.client.list_ports.return_value = {"ports": []} - service.client.list_subnets.return_value = {"subnets": []} - service.client.delete_network.return_value = "foo_deleted" - result = service.delete_network({"id": "foo_id", "router_id": None, - "subnets": []}) - self.assertEqual("foo_deleted", result) - self.assertEqual([], service.client.remove_gateway_router.mock_calls) - self.assertEqual( - [], service.client.remove_interface_router.mock_calls) - self.assertEqual([], service.client.delete_router.mock_calls) - self.assertEqual([], service.client.delete_subnet.mock_calls) - service.client.delete_network.assert_called_once_with("foo_id") - - def test_delete_v1_pool(self): - service = self.get_wrapper() - pool = {"pool": {"id": "pool-id"}} - service.delete_v1_pool(pool["pool"]["id"]) - service.client.delete_pool.assert_called_once_with("pool-id") - - @mock.patch("rally.plugins.openstack.wrappers.network.NeutronWrapper" - ".supports_extension", return_value=(True, "")) - def test_delete_network_with_dhcp_and_router_and_ports_and_subnets( - self, mock_neutron_wrapper_supports_extension): - - service = self.get_wrapper() - agents = ["foo_agent", "bar_agent"] - subnets = ["foo_subnet", "bar_subnet"] - ports = [{"id": "foo_port", "device_owner": "network:router_interface", - "device_id": "rounttter"}, - {"id": "bar_port", "device_owner": "network:dhcp"}] - service.client.list_dhcp_agent_hosting_networks.return_value = ( - {"agents": [{"id": agent_id} for agent_id in agents]}) - service.client.list_ports.return_value = ({"ports": ports}) - service.client.list_subnets.return_value = ( - {"subnets": [{"id": id_} for id_ in subnets]}) - service.client.delete_network.return_value = "foo_deleted" - - result = service.delete_network( - {"id": "foo_id", "router_id": "foo_router", "subnets": subnets, - "lb_pools": []}) - - self.assertEqual("foo_deleted", result) - self.assertEqual( - service.client.remove_network_from_dhcp_agent.mock_calls, - [mock.call(agent_id, "foo_id") for agent_id in agents]) - self.assertEqual(service.client.remove_gateway_router.mock_calls, - [mock.call("foo_router")]) - service.client.delete_port.assert_called_once_with(ports[1]["id"]) - service.client.remove_interface_router.assert_called_once_with( - ports[0]["device_id"], {"port_id": ports[0]["id"]}) - self.assertEqual(service.client.delete_subnet.mock_calls, - [mock.call(subnet_id) for subnet_id in subnets]) - service.client.delete_network.assert_called_once_with("foo_id") - - mock_neutron_wrapper_supports_extension.assert_called_once_with( - "dhcp_agent_scheduler") - - @ddt.data({"exception_type": neutron_exceptions.NotFound, - "should_raise": False}, - {"exception_type": neutron_exceptions.BadRequest, - "should_raise": False}, - {"exception_type": KeyError, - "should_raise": True}) - @ddt.unpack - @mock.patch("rally.plugins.openstack.wrappers.network.NeutronWrapper" - ".supports_extension", return_value=(True, "")) - def test_delete_network_with_router_throw_exception( - self, mock_neutron_wrapper_supports_extension, exception_type, - should_raise): - # Ensure cleanup context still move forward even - # remove_interface_router throw NotFound/BadRequest exception - - service = self.get_wrapper() - service.client.remove_interface_router.side_effect = exception_type - agents = ["foo_agent", "bar_agent"] - subnets = ["foo_subnet", "bar_subnet"] - ports = [{"id": "foo_port", "device_owner": "network:router_interface", - "device_id": "rounttter"}, - {"id": "bar_port", "device_owner": "network:dhcp"}] - service.client.list_dhcp_agent_hosting_networks.return_value = ( - {"agents": [{"id": agent_id} for agent_id in agents]}) - service.client.list_ports.return_value = ({"ports": ports}) - service.client.delete_network.return_value = "foo_deleted" - service.client.list_subnets.return_value = {"subnets": [ - {"id": id_} for id_ in subnets]} - - if should_raise: - self.assertRaises(exception_type, service.delete_network, - {"id": "foo_id", "router_id": "foo_router", - "subnets": subnets, "lb_pools": []}) - - self.assertNotEqual(service.client.delete_subnet.mock_calls, - [mock.call(subnet_id) for subnet_id in - subnets]) - self.assertFalse(service.client.delete_network.called) - else: - result = service.delete_network( - {"id": "foo_id", "router_id": "foo_router", "subnets": subnets, - "lb_pools": []}) - - self.assertEqual("foo_deleted", result) - service.client.delete_port.assert_called_once_with(ports[1]["id"]) - service.client.remove_interface_router.assert_called_once_with( - ports[0]["device_id"], {"port_id": ports[0]["id"]}) - self.assertEqual(service.client.delete_subnet.mock_calls, - [mock.call(subnet_id) for subnet_id in subnets]) - service.client.delete_network.assert_called_once_with("foo_id") - - self.assertEqual( - service.client.remove_network_from_dhcp_agent.mock_calls, - [mock.call(agent_id, "foo_id") for agent_id in agents]) - self.assertEqual(service.client.remove_gateway_router.mock_calls, - [mock.call("foo_router")]) - mock_neutron_wrapper_supports_extension.assert_called_once_with( - "dhcp_agent_scheduler") - - def test_list_networks(self): - service = self.get_wrapper() - service.client.list_networks.return_value = {"networks": "foo_nets"} - self.assertEqual("foo_nets", service.list_networks()) - service.client.list_networks.assert_called_once_with() - - @mock.patch(SVC + "NeutronWrapper.external_networks") - def test_create_floating_ip(self, mock_neutron_wrapper_external_networks): - wrap = self.get_wrapper() - wrap.create_port = mock.Mock(return_value={"id": "port_id"}) - wrap.client.create_floatingip = mock.Mock( - return_value={"floatingip": {"id": "fip_id", - "floating_ip_address": "fip_ip"}}) - - self.assertRaises(ValueError, wrap.create_floating_ip) - - mock_neutron_wrapper_external_networks.__get__ = lambda *args: [] - self.assertRaises(network.NetworkWrapperException, - wrap.create_floating_ip, tenant_id="foo_tenant") - - mock_neutron_wrapper_external_networks.__get__ = ( - lambda *args: [{"id": "ext_id"}] - ) - fip = wrap.create_floating_ip(tenant_id="foo_tenant", - port_id="port_id") - self.assertEqual({"id": "fip_id", "ip": "fip_ip"}, fip) - - wrap.get_network = mock.Mock( - return_value={"id": "foo_net", "external": True}) - wrap.create_floating_ip(tenant_id="foo_tenant", ext_network="ext_net", - port_id="port_id") - - wrap.get_network = mock.Mock( - return_value={"id": "foo_net", "external": False}) - wrap.create_floating_ip(tenant_id="foo_tenant", port_id="port_id") - - self.assertRaises(network.NetworkWrapperException, - wrap.create_floating_ip, tenant_id="foo_tenant", - ext_network="ext_net") - - def test_delete_floating_ip(self): - wrap = self.get_wrapper() - wrap.delete_floating_ip("fip_id") - wrap.delete_floating_ip("fip_id", ignored_kwarg="bar") - self.assertEqual([mock.call("fip_id")] * 2, - wrap.client.delete_floatingip.mock_calls) - - @mock.patch(SVC + "NeutronWrapper.external_networks") - def test_create_router(self, mock_neutron_wrapper_external_networks): - wrap = self.get_wrapper() - wrap.client.create_router.return_value = {"router": "foo_router"} - wrap.client.list_extensions.return_value = { - "extensions": [{"alias": "ext-gw-mode"}]} - mock_neutron_wrapper_external_networks.__get__ = ( - lambda *args: [{"id": "ext_id"}] - ) - - router = wrap.create_router() - wrap.client.create_router.assert_called_once_with( - {"router": {"name": self.owner.generate_random_name.return_value}}) - self.assertEqual("foo_router", router) - - router = wrap.create_router(external=True, foo="bar") - wrap.client.create_router.assert_called_with( - {"router": {"name": self.owner.generate_random_name.return_value, - "external_gateway_info": { - "network_id": "ext_id", - "enable_snat": True}, - "foo": "bar"}}) - - @mock.patch(SVC + "NeutronWrapper.external_networks") - def test_create_router_without_ext_gw_mode_extension( - self, mock_neutron_wrapper_external_networks): - wrap = self.get_wrapper() - wrap.client.create_router.return_value = {"router": "foo_router"} - wrap.client.list_extensions.return_value = {"extensions": []} - mock_neutron_wrapper_external_networks.__get__ = ( - lambda *args: [{"id": "ext_id"}] - ) - - router = wrap.create_router() - wrap.client.create_router.assert_called_once_with( - {"router": {"name": self.owner.generate_random_name.return_value}}) - self.assertEqual(router, "foo_router") - - router = wrap.create_router(external=True, foo="bar") - wrap.client.create_router.assert_called_with( - {"router": {"name": self.owner.generate_random_name.return_value, - "external_gateway_info": {"network_id": "ext_id"}, - "foo": "bar"}}) - - def test_create_port(self): - wrap = self.get_wrapper() - wrap.client.create_port.return_value = {"port": "foo_port"} - - port = wrap.create_port("foo_net") - wrap.client.create_port.assert_called_once_with( - {"port": {"network_id": "foo_net", - "name": self.owner.generate_random_name.return_value}}) - self.assertEqual("foo_port", port) - - port = wrap.create_port("foo_net", foo="bar") - wrap.client.create_port.assert_called_with( - {"port": {"network_id": "foo_net", - "name": self.owner.generate_random_name.return_value, - "foo": "bar"}}) - - def test_supports_extension(self): - wrap = self.get_wrapper() - wrap.client.list_extensions.return_value = ( - {"extensions": [{"alias": "extension"}]}) - self.assertTrue(wrap.supports_extension("extension")[0]) - - wrap.client.list_extensions.return_value = ( - {"extensions": [{"alias": "extension"}]}) - self.assertFalse(wrap.supports_extension("dummy-group")[0]) - - wrap.client.list_extensions.return_value = {} - self.assertFalse(wrap.supports_extension("extension")[0]) - - -class FunctionsTestCase(test.TestCase): - - def test_generate_cidr(self): - with mock.patch("rally.plugins.openstack.wrappers.network.cidr_incr", - iter(range(1, 4))): - self.assertEqual("10.2.1.0/24", network.generate_cidr()) - self.assertEqual("10.2.2.0/24", network.generate_cidr()) - self.assertEqual("10.2.3.0/24", network.generate_cidr()) - - with mock.patch("rally.plugins.openstack.wrappers.network.cidr_incr", - iter(range(1, 4))): - start_cidr = "1.1.0.0/26" - self.assertEqual("1.1.0.64/26", network.generate_cidr(start_cidr)) - self.assertEqual("1.1.0.128/26", network.generate_cidr(start_cidr)) - self.assertEqual("1.1.0.192/26", network.generate_cidr(start_cidr)) - - def test_wrap(self): - mock_clients = mock.Mock() - mock_clients.nova().networks.list.return_value = [] - config = {"fakearg": "fake"} - owner = Owner() - - mock_clients.services.return_value = {"foo": consts.Service.NEUTRON} - wrapper = network.wrap(mock_clients, owner, config) - self.assertIsInstance(wrapper, network.NeutronWrapper) - self.assertEqual(wrapper.owner, owner) - self.assertEqual(wrapper.config, config) diff --git a/tests/unit/plugins/workload/__init__.py b/tests/unit/plugins/workload/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/plugins/workload/test_siege.py b/tests/unit/plugins/workload/test_siege.py deleted file mode 100644 index f5913fb0d4..0000000000 --- a/tests/unit/plugins/workload/test_siege.py +++ /dev/null @@ -1,82 +0,0 @@ -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import sys - -from rally.plugins.workload import siege -from tests.unit import test - -import mock - -SIEGE_OUTPUT = """ -Transactions: 522 hits -Availability: 100.00 % -Elapsed time: 3.69 secs -Data transferred: 1.06 MB -Response time: 0.10 secs -Transaction rate: 141.46 trans/sec -Throughput: 0.29 MB/sec -Concurrency: 14.71 -Successful transactions: 522 -Failed transactions: 0 -Longest transaction: 0.26 -Shortest transaction: 0.08 -""" - -OUTPUT = [ - {"output_value": "curl", "descr": "", "output_key": "curl_cli"}, - {"output_value": "wp-net", "descr": "", "output_key": "net_name"}, - {"output_value": ["10.0.0.3", "172.16.0.159"], - "description": "", - "output_key": "gate_node"}, - {"output_value": { - "1": {"wordpress-network": ["10.0.0.4"]}, - "0": {"wordpress-network": ["10.0.0.5"]}}, - "description": "No description given", "output_key": "wp_nodes"}] - - -class SiegeTestCase(test.TestCase): - - @mock.patch("rally.plugins.workload.siege.json.load") - def test_get_instances(self, mock_load): - mock_load.return_value = OUTPUT - instances = list(siege.get_instances()) - self.assertEqual(["10.0.0.4", "10.0.0.5"], instances) - - @mock.patch("rally.plugins.workload.siege.get_instances") - @mock.patch("rally.plugins.workload.siege.generate_urls_list") - @mock.patch("rally.plugins.workload.siege.subprocess.check_output") - def test_run(self, mock_check_output, mock_generate_urls_list, - mock_get_instances): - mock_get_instances.return_value = [1, 2] - mock_generate_urls_list.return_value = "urls" - mock_check_output.return_value = SIEGE_OUTPUT - mock_write = mock.MagicMock() - mock_stdout = mock.MagicMock(write=mock_write) - real_stdout = sys.stdout - sys.stdout = mock_stdout - siege.run() - expected = [mock.call("Transaction rate:141.46\n"), - mock.call("Throughput:0.29\n")] - sys.stdout = real_stdout - self.assertEqual(expected, mock_write.mock_calls) - - @mock.patch("rally.plugins.workload.siege.tempfile.NamedTemporaryFile") - def test_generate_urls_list(self, mock_named_temporary_file): - mock_urls = mock.MagicMock() - mock_named_temporary_file.return_value = mock_urls - name = siege.generate_urls_list(["foo", "bar"]) - self.assertEqual(mock_urls.name, name) diff --git a/tests/unit/rally_jobs/__init__.py b/tests/unit/rally_jobs/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/unit/rally_jobs/test_jobs.py b/tests/unit/rally_jobs/test_jobs.py deleted file mode 100644 index 3fa8a04262..0000000000 --- a/tests/unit/rally_jobs/test_jobs.py +++ /dev/null @@ -1,88 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import shutil -import tempfile -import traceback - -import mock - -import rally -from rally import api -from rally.common.plugin import discover -from rally.common import yamlutils as yaml -from rally.task import engine -from tests.unit import fakes -from tests.unit import test - - -class RallyJobsTestCase(test.TestCase): - rally_jobs_path = os.path.join( - os.path.dirname(rally.__file__), "..", "rally-jobs") - - def setUp(self): - super(RallyJobsTestCase, self).setUp() - self.tmp_dir = tempfile.mkdtemp() - os.makedirs(os.path.join(self.tmp_dir, ".rally")) - shutil.copytree(os.path.join(self.rally_jobs_path, "extra"), - os.path.join(self.tmp_dir, ".rally", "extra")) - - self.original_home = os.environ["HOME"] - os.environ["HOME"] = self.tmp_dir - - def return_home(): - os.environ["HOME"] = self.original_home - self.addCleanup(shutil.rmtree, self.tmp_dir) - - self.addCleanup(return_home) - - def test_schema_is_valid(self): - discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins")) - - files = {f for f in os.listdir(self.rally_jobs_path) - if (os.path.isfile(os.path.join(self.rally_jobs_path, f)) and - f.endswith(".yaml") and not f.endswith("_args.yaml"))} - - # TODO(andreykurilin): figure out why it fails - files -= {"rally-mos.yaml", "sahara-clusters.yaml"} - - for filename in files: - full_path = os.path.join(self.rally_jobs_path, filename) - - with open(full_path) as task_file: - try: - args_file = os.path.join( - self.rally_jobs_path, - filename.rsplit(".", 1)[0] + "_args.yaml") - - args = {} - if os.path.exists(args_file): - args = yaml.safe_load(open(args_file).read()) - if not isinstance(args, dict): - raise TypeError( - "args file %s must be dict in yaml or json " - "presentation" % args_file) - - task_inst = api._Task(api.API(skip_db_check=True)) - task = task_inst.render_template( - task_template=task_file.read(), **args) - task = engine.TaskConfig(yaml.safe_load(task)) - task_obj = fakes.FakeTask({"uuid": full_path}) - - eng = engine.TaskEngine(task, task_obj, mock.Mock()) - eng.validate(only_syntax=True) - except Exception: - print(traceback.format_exc()) - self.fail("Wrong task input file: %s" % full_path) diff --git a/tests/unit/task/test_engine.py b/tests/unit/task/test_engine.py index 2039d46318..ae53d2bb19 100644 --- a/tests/unit/task/test_engine.py +++ b/tests/unit/task/test_engine.py @@ -331,8 +331,7 @@ class TaskEngineTestCase(test.TestCase): pass mock_task_instance = mock.MagicMock() - wconf1 = self._make_workload(name="SomeScen.scenario", - contexts={"users": {}}) + wconf1 = self._make_workload(name="SomeScen.scenario") wconf2 = self._make_workload(name="SomeScen.scenario", position=1) subtask1 = {"workloads": [wconf1, wconf2]} diff --git a/tests/unit/task/test_types.py b/tests/unit/task/test_types.py index 305542abfd..994e801dd6 100644 --- a/tests/unit/task/test_types.py +++ b/tests/unit/task/test_types.py @@ -139,103 +139,3 @@ class PreprocessTestCase(test.TestCase): mock_scenario_get.return_value._meta_get.assert_called_once_with( "preprocessors", default={}) self.assertEqual({"a": 20, "b": 20}, result) - - -class DeprecatedBehaviourMixinTestCase(test.TestCase): - def test_transform(self): - call_args_list = [] - expected_return_value = mock.Mock() - - @types.plugin.configure(self.id()) - class OldResource(types.ResourceType, - types.DeprecatedBehaviourMixin): - def pre_process(s, resource_spec, config): - call_args_list.append((resource_spec, config)) - return expected_return_value - - clients = mock.Mock() - resource_config = {"foo": "bar"} - - self.assertEqual(expected_return_value, - OldResource.transform(clients, resource_config)) - self.assertEqual(expected_return_value, - OldResource.transform(None, resource_config)) - self.assertEqual([(resource_config, {}), - (resource_config, {})], call_args_list) - - -class ResourceTypeCompatTestCase(test.TestCase): - """Check how compatibility with an old interface works.""" - - def test_applying_preprocess_method(self): - - setattr(types._pre_process_method, "key", self.id()) - - @plugin.configure("1-%s" % self.id()) - class OldResourceType(types.ResourceType): - @classmethod - def transform(cls, clients, resource_config): - pass - - self.assertEqual( - self.id(), - getattr(OldResourceType({}, {}).pre_process, "key", None)) - - @plugin.configure("2-%s" % self.id()) - class ResourceTypeWithInnerCompatLayer(types.ResourceType): - @classmethod - def transform(cls, clients, resource_config): - pass - - def pre_process(self, resource_spec, config): - pass - - self.assertNotEqual( - self.id(), - getattr(ResourceTypeWithInnerCompatLayer.pre_process, "key", None)) - - @plugin.configure("3-%s" % self.id()) - class CurrentResourceType(types.ResourceType): - def pre_process(self, resource_spec, config): - pass - - self.assertFalse(hasattr(CurrentResourceType, "transform")) - self.assertNotEqual( - self.id(), - getattr(CurrentResourceType.pre_process, "key", None)) - - def test__pre_process_method(self): - cred1 = mock.Mock() - cred2 = mock.Mock() - - self_obj = mock.Mock() - self_obj.__class__ = mock.Mock() - self_obj._context = {"admin": {"credential": cred1}, - "users": [{"credential": cred2}]} - - # case #1: in case of None resource_spec, None should be returned - self.assertIsNone(types._pre_process_method(self_obj, None, None)) - self.assertFalse(self_obj.__class__.transform.called) - - # case #2: admin creds should be used - resource_spec = {"foo": "bar"} - res = types._pre_process_method(self_obj, resource_spec, None) - self.assertEqual(self_obj.__class__.transform.return_value, res) - self.assertTrue(self_obj.__class__.transform.called) - c_args, c_kwargs = self_obj.__class__.transform.call_args_list[0] - self.assertFalse(c_args) - self.assertEqual({"resource_config", "clients"}, set(c_kwargs.keys())) - self.assertEqual(resource_spec, c_kwargs["resource_config"]) - self.assertEqual(cred1, c_kwargs["clients"].credential) - - # case #3: user creds should be used - self_obj.__class__.transform.reset_mock() - self_obj._context.pop("admin", None) - res = types._pre_process_method(self_obj, resource_spec, None) - self.assertEqual(self_obj.__class__.transform.return_value, res) - self.assertTrue(self_obj.__class__.transform.called) - c_args, c_kwargs = self_obj.__class__.transform.call_args_list[0] - self.assertFalse(c_args) - self.assertEqual({"resource_config", "clients"}, set(c_kwargs.keys())) - self.assertEqual(resource_spec, c_kwargs["resource_config"]) - self.assertEqual(cred2, c_kwargs["clients"].credential) diff --git a/tests/unit/task/test_validation.py b/tests/unit/task/test_validation.py deleted file mode 100755 index bddb820f5c..0000000000 --- a/tests/unit/task/test_validation.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright 2014: Mirantis Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from rally.common.plugin import plugin -from rally.common import validation as common_validation -from rally.task import validation -from tests.unit import fakes -from tests.unit import test - - -class ValidationUtilsTestCase(test.TestCase): - - def setUp(self): - super(ValidationUtilsTestCase, self).setUp() - - class Plugin(plugin.Plugin): - pass - - Plugin._meta_init() - self.addCleanup(Plugin.unregister) - self.Plugin = Plugin - - def test_old_validator_admin(self): - - validator_func = mock.Mock() - validator_func.return_value = None - - validator = validation.validator(validator_func) - - self.assertEqual(self.Plugin, - validator("a", "b", "c", d=1)(self.Plugin)) - self.assertEqual(1, len(self.Plugin._meta_get("validators"))) - - vname, args, kwargs = self.Plugin._meta_get("validators")[0] - validator_cls = common_validation.Validator.get(vname) - validator_inst = validator_cls(*args, **kwargs) - fake_admin = fakes.fake_credential() - ctx = {"admin": {"credential": fake_admin}, "users": []} - result = validator_inst.validate(ctx, {}, None, None) - self.assertIsNone(result) - - validator_func.assert_called_once_with( - {}, None, mock.ANY, "a", "b", "c", d=1) - deployment = validator_func.call_args[0][2] - self.assertEqual({"admin": fake_admin, "users": []}, - deployment.get_credentials_for("openstack")) - - def test_old_validator_users(self): - - validator_func = mock.Mock() - validator_func.return_value = None - - validator = validation.validator(validator_func) - - self.assertEqual(self.Plugin, - validator("a", "b", "c", d=1)(self.Plugin)) - self.assertEqual(1, len(self.Plugin._meta_get("validators"))) - - vname, args, kwargs = self.Plugin._meta_get("validators")[0] - validator_cls = common_validation.Validator.get(vname) - validator_inst = validator_cls(*args, **kwargs) - fake_admin = fakes.fake_credential() - fake_users1 = fakes.fake_credential() - fake_users2 = fakes.fake_credential() - users = [{"credential": fake_users1}, {"credential": fake_users2}] - ctx = {"admin": {"credential": fake_admin}, "users": users} - result = validator_inst.validate(ctx, {}, None, None) - self.assertIsNone(result) - - fake_users1.clients.assert_called_once_with() - fake_users2.clients.assert_called_once_with() - validator_func.assert_has_calls(( - mock.call({}, fake_users1.clients.return_value, mock.ANY, - "a", "b", "c", d=1), - mock.call({}, fake_users2.clients.return_value, mock.ANY, - "a", "b", "c", d=1) - )) - for args in validator_func.call_args: - deployment = validator_func.call_args[0][2] - self.assertEqual({"admin": fake_admin, - "users": [fake_users1, fake_users2]}, - deployment.get_credentials_for("openstack")) - - def test_old_validator_users_error(self): - - validator_func = mock.Mock() - validator_func.return_value = validation.ValidationResult(False) - - validator = validation.validator(validator_func) - - self.assertEqual(self.Plugin, - validator("a", "b", "c", d=1)(self.Plugin)) - self.assertEqual(1, len(self.Plugin._meta_get("validators"))) - - vname, args, kwargs = self.Plugin._meta_get("validators")[0] - validator_cls = common_validation.Validator.get(vname) - validator_inst = validator_cls(*args, **kwargs) - fake_admin = fakes.fake_credential() - fake_users1 = fakes.fake_credential() - fake_users2 = fakes.fake_credential() - users = [{"credential": fake_users1}, {"credential": fake_users2}] - ctx = {"admin": {"credential": fake_admin}, "users": users} - self.assertRaises( - common_validation.ValidationError, - validator_inst.validate, ctx, {}, None, None) - - fake_users1.clients.assert_called_once_with() - fake_users2.clients.assert_called_once_with() - validator_func.assert_called_once_with( - {}, fake_users1.clients.return_value, mock.ANY, - "a", "b", "c", d=1) - deployment = validator_func.call_args[0][2] - self.assertEqual({"admin": fake_admin, - "users": [fake_users1, fake_users2]}, - deployment.get_credentials_for("openstack")) - - @mock.patch("rally.task.validation.LOG.warning") - def test_deprecated_validator(self, mock_log_warning): - - my_deprecated_validator = validation.deprecated_validator( - "new_validator", "deprecated_validator", "0.10.0") - self.Plugin = my_deprecated_validator("foo", bar="baz")(self.Plugin) - self.assertEqual([("new_validator", ("foo",), {"bar": "baz"})], - self.Plugin._meta_get("validators")) - mock_log_warning.assert_called_once_with(mock.ANY) - - def _unwrap_validator(self, validator, *args, **kwargs): - name = self.id() - - @plugin.base() - class Foo(plugin.Plugin, - validation.validation.ValidatablePluginMixin): - pass - - @plugin.configure(name) - class TempPlugin(Foo): - pass - - self.addCleanup(TempPlugin.unregister) - - validator(*args, **kwargs)(TempPlugin) - - def wrap_validator(config): - return (Foo.validate(name, {}, config, {}) or []) - - return wrap_validator - - def test_share_proto_compatibility(self): - validator = self._unwrap_validator( - validation.validate_share_proto) - res = validator({"args": {"share_proto": "GLUSTERFS"}}) - self.assertEqual(0, len(res)) - - res = validator({"args": {"share_proto": "fake"}}) - self.assertEqual(1, len(res)) - self.assertEqual("share_proto is fake which is not a valid value from " - "['nfs', 'cifs', 'glusterfs', 'hdfs', 'cephfs']", - res[0]) - - @mock.patch("rally.common.yamlutils.safe_load") - @mock.patch("rally.plugins.openstack.validators.os.access") - @mock.patch("rally.plugins.openstack.validators.open") - def test_workbook_contains_workflow_compatibility( - self, mock_open, mock_access, mock_safe_load): - mock_safe_load.return_value = { - "version": "2.0", - "name": "wb", - "workflows": { - "wf1": { - "type": "direct", - "tasks": { - "t1": { - "action": "std.noop" - } - } - } - } - } - - validator = self._unwrap_validator( - validation.workbook_contains_workflow, "definition", - "workflow_name") - context = { - "args": { - "definition": "fake_path1", - "workflow_name": "wf1" - } - } - - validator(context) - self.assertEqual(1, mock_open.called) - self.assertEqual(1, mock_access.called) - self.assertEqual(1, mock_safe_load.called) - - def test_validation_result(self): - self.assertEqual("validation success", - str(validation.ValidationResult(True))) - self.assertEqual("my msg", - str(validation.ValidationResult(False, "my msg"))) - self.assertEqual("---------- Exception in validator ----------\ntb\n", - str(validation.ValidationResult(False, "my msg", - etype=Exception, - etraceback="tb\n"))) diff --git a/tests/unit/test_api.py b/tests/unit/test_api.py index 652215d08f..0bb71b2b58 100644 --- a/tests/unit/test_api.py +++ b/tests/unit/test_api.py @@ -206,7 +206,7 @@ class TaskAPITestCase(test.TestCase): def test_render_template_include_other_template(self): other_template_path = os.path.join( os.path.dirname(__file__), - "..", "..", "samples/tasks/scenarios/nova/boot.json") + "..", "..", "samples/tasks/scenarios/dummy/dummy.json") template = "{%% include \"%s\" %%}" % os.path.basename( other_template_path) with open(other_template_path) as f: @@ -884,14 +884,6 @@ class DeploymentAPITestCase(BaseDeploymentTestCase): env.check_health.assert_called_once_with() self.assertFalse(env.get_info.called) - def test_service_list(self): - fake_credential = fakes.fake_credential() - deployment = mock.Mock(spec=objects.Deployment) - deployment.get_credentials_for.return_value = { - "admin": fake_credential, "users": []} - result = self.deployment_inst.service_list(deployment=deployment) - self.assertEqual(fake_credential.list_services.return_value, result) - class APITestCase(test.TestCase): @@ -1404,12 +1396,9 @@ class VerifierAPITestCase(test.TestCase): @mock.patch("rally.cli.commands.verify.logging.is_debug", return_value=False) - @mock.patch("rally.plugins.openstack.verification.tempest.manager." - "os.path.exists") @mock.patch("rally.api._Verifier._get") def test_configure_when_it_is_already_configured(self, mock___verifier__get, - mock_exists, mock_is_debug): verifier_obj = mock___verifier__get.return_value verifier_id = "uuiiiidd" @@ -1457,11 +1446,9 @@ class VerifierAPITestCase(test.TestCase): @mock.patch("rally.cli.commands.verify.logging.is_debug", return_value=True) - @mock.patch("rally.plugins.openstack.verification.tempest.manager." - "os.path.exists") @mock.patch("rally.api._Verifier._get") def test_configure_when_it_is_already_configured_with_logging( - self, mock___verifier__get, mock_exists, mock_is_debug): + self, mock___verifier__get, mock_is_debug): verifier_obj = mock___verifier__get.return_value verifier_id = "uuiiiidd" deployment_id = "deployment" @@ -1525,11 +1512,9 @@ class VerifierAPITestCase(test.TestCase): self.assertIn("because verifier %s is in '%s' status" % (verifier_obj, status), "%s" % e) - @mock.patch("rally.plugins.openstack.verification.tempest.manager." - "os.path.exists") @mock.patch("rally.api._Verifier._get") def test_override_config_when_it_is_already_configured( - self, mock___verifier__get, mock_exists): + self, mock___verifier__get): verifier_obj = mock___verifier__get.return_value verifier_id = "uuiiiidd" deployment_id = "deployment"