From 56baf73d0599e59d58627fcf2ab78786dc1608a3 Mon Sep 17 00:00:00 2001 From: Vagrant User Date: Wed, 15 Apr 2015 18:42:31 +0000 Subject: [PATCH 01/87] Prepare for X --- Dockerfile | 17 - README.md | 39 --- compose.yml | 11 - docker.yml | 8 - docs/spec/building_dependencies.md | 67 ---- docs/spec/current_flow.spec | 33 -- docs/spec/deployment_blocks.spec | 60 ---- docs/spec/how_to_do_primary.md | 18 -- docs/spec/inventory.spec | 14 - docs/spec/layers.spec | 8 - docs/spec/networking.spec | 81 ----- docs/spec/orchestration_in_fuel.yaml | 94 ------ docs/spec/questions | 52 --- examples/nodes_list.yaml | 9 - examples/resources/docker.yml | 11 - examples/resources/mariadb.yml | 16 - examples/resources/simple/docker/remove.yml | 4 - examples/resources/simple/docker/run.yml | 10 - examples/resources/simple/group_vars/all | 14 - examples/resources/simple/host_vars/first | 6 - examples/resources/simple/hosts | 20 -- examples/resources/simple/mariadb/remove.yml | 6 - examples/resources/simple/mariadb/run.yml | 6 - examples/resources/simple/mariadb/users.yml | 11 - examples/resources/simple/mariadb/wait.yml | 9 - examples/resources/simple/rabbitmq/remove.yml | 6 - examples/resources/simple/rabbitmq/run.yml | 6 - examples/resources/simple/remove.yml | 5 - examples/resources/simple/run.yml | 5 - examples/resources/simple/user/remove.yml | 12 - examples/resources/simple/user/run.yml | 6 - kolla.yml | 20 -- main.yml | 15 - solar/MANIFEST.in | 2 - solar/requirements.txt | 3 - solar/setup.py | 50 --- solar/solar/__init__.py | 0 solar/solar/cli.py | 105 ------ solar/solar/core/__init__.py | 0 solar/solar/core/extensions_manager.py | 17 - solar/solar/core/profile.py | 10 - solar/solar/errors.py | 10 - solar/solar/extensions/__init__.py | 37 --- solar/solar/extensions/base.py | 28 -- solar/solar/extensions/modules/__init__.py | 0 solar/solar/extensions/modules/ansible.py | 178 ---------- solar/solar/extensions/modules/discovery.py | 54 ---- solar/solar/extensions/modules/playbook.py | 11 - solar/solar/extensions/modules/resources.py | 26 -- solar/solar/interfaces/__init__.py | 0 solar/solar/interfaces/db/__init__.py | 9 - solar/solar/interfaces/db/file_system_db.py | 101 ------ solar/solar/templates/profile.yml | 22 -- solar/solar/third_party/__init__.py | 0 solar/solar/third_party/dir_dbm.py | 303 ------------------ solar/solar/utils.py | 61 ---- 56 files changed, 1726 deletions(-) delete mode 100644 Dockerfile delete mode 100644 README.md delete mode 100644 compose.yml delete mode 100644 docker.yml delete mode 100644 docs/spec/building_dependencies.md delete mode 100644 docs/spec/current_flow.spec delete mode 100644 docs/spec/deployment_blocks.spec delete mode 100644 docs/spec/how_to_do_primary.md delete mode 100644 docs/spec/inventory.spec delete mode 100644 docs/spec/layers.spec delete mode 100644 docs/spec/networking.spec delete mode 100644 docs/spec/orchestration_in_fuel.yaml delete mode 100644 docs/spec/questions delete mode 100644 examples/nodes_list.yaml delete mode 100644 examples/resources/docker.yml delete mode 100644 examples/resources/mariadb.yml delete mode 100644 examples/resources/simple/docker/remove.yml delete mode 100644 examples/resources/simple/docker/run.yml delete mode 100644 examples/resources/simple/group_vars/all delete mode 100644 examples/resources/simple/host_vars/first delete mode 100644 examples/resources/simple/hosts delete mode 100644 examples/resources/simple/mariadb/remove.yml delete mode 100644 examples/resources/simple/mariadb/run.yml delete mode 100644 examples/resources/simple/mariadb/users.yml delete mode 100644 examples/resources/simple/mariadb/wait.yml delete mode 100644 examples/resources/simple/rabbitmq/remove.yml delete mode 100644 examples/resources/simple/rabbitmq/run.yml delete mode 100644 examples/resources/simple/remove.yml delete mode 100644 examples/resources/simple/run.yml delete mode 100644 examples/resources/simple/user/remove.yml delete mode 100644 examples/resources/simple/user/run.yml delete mode 100644 kolla.yml delete mode 100644 main.yml delete mode 100644 solar/MANIFEST.in delete mode 100644 solar/requirements.txt delete mode 100644 solar/setup.py delete mode 100644 solar/solar/__init__.py delete mode 100644 solar/solar/cli.py delete mode 100644 solar/solar/core/__init__.py delete mode 100644 solar/solar/core/extensions_manager.py delete mode 100644 solar/solar/core/profile.py delete mode 100644 solar/solar/errors.py delete mode 100644 solar/solar/extensions/__init__.py delete mode 100644 solar/solar/extensions/base.py delete mode 100644 solar/solar/extensions/modules/__init__.py delete mode 100644 solar/solar/extensions/modules/ansible.py delete mode 100644 solar/solar/extensions/modules/discovery.py delete mode 100644 solar/solar/extensions/modules/playbook.py delete mode 100644 solar/solar/extensions/modules/resources.py delete mode 100644 solar/solar/interfaces/__init__.py delete mode 100644 solar/solar/interfaces/db/__init__.py delete mode 100644 solar/solar/interfaces/db/file_system_db.py delete mode 100644 solar/solar/templates/profile.yml delete mode 100644 solar/solar/third_party/__init__.py delete mode 100644 solar/solar/third_party/dir_dbm.py delete mode 100644 solar/solar/utils.py diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index cf463228..00000000 --- a/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM debian:jessie -MAINTAINER Andrew Woodward awoodward@mirantis.com - -ENV DEBIAN_FRONTEND noninteractive - -RUN apt-get update && apt-get -y install --fix-missing \ - curl \ - ssh \ - ansible \ - python-pip - -ADD . /vagrant/ -WORKDIR /vagrant - -RUN ansible-playbook -i "localhost," -c local main.yml - -VOLUME /vagrant \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index 3caca59e..00000000 --- a/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Setup development env -* Install virtualbox -* Install vagrant -* Setup environment -``` -$ cd fuel-ng -$ vagrant up -``` -* Login into vm, the code is available in /vagrant directory -``` -$ vagrant ssh -$ solar --help -``` - -## Solar usage -* discover nodes, with standard file based discovery -``` -solar discover -``` -* create profile (global config) -``` -solar profile --create --id prf1 --tags env/test_env -``` -* assign nodes to profile with tags -``` -# edit nodes files, in the future we want to provide -# some cli in order to change the data - -vim tmp/storage/nodes-id.yaml - -# add 'env/test_env' in tags list -``` -* in order to assign resouce to the node use the same the same - method, i.e. add in tags list for node your service e.g. - 'service/docker' -* perform deployment -``` -solar configure --profile prf1 -pa run -``` diff --git a/compose.yml b/compose.yml deleted file mode 100644 index 949b8492..00000000 --- a/compose.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: all - sudo: yes - tasks: - - shell: docker-compose --version - register: compose - ignore_errors: true - - shell: curl -L https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose - when: compose|failed - - shell: chmod +x /usr/local/bin/docker-compose diff --git a/docker.yml b/docker.yml deleted file mode 100644 index f4c70229..00000000 --- a/docker.yml +++ /dev/null @@ -1,8 +0,0 @@ -- hosts: all - sudo: yes - tasks: - - shell: docker --version - ignore_errors: true - register: docker_version - - shell: curl -sSL https://get.docker.com/ | sudo sh - when: docker_version | failed diff --git a/docs/spec/building_dependencies.md b/docs/spec/building_dependencies.md deleted file mode 100644 index 2a96cd7b..00000000 --- a/docs/spec/building_dependencies.md +++ /dev/null @@ -1,67 +0,0 @@ - - -Problem: Different execution strategies ---------------------------------------- - -We will have different order of execution for different actions -(installation, removal, maintenance) - -1. Installation and removal of resources should be done in different order. -2. Running maintenance tasks may require completely different order -of actions, and this order can not be described one time for resources, -it should be described for each action. - -IMPORTANT: In such case resources are making very little sense, -because we need to define different dependencies and build different -executions graphs for tasks during lifecycle management - - -Dependency between resources ------------------------------ -Several options to manage ordering between executables - -1. Allow user to specify this order -2. Explicitly set requires/require_for in additional entity like profile -3. Deployment flow should reflect data-dependencies between resources - -1st option is pretty clear - and we should provide a way for user -to manage dependencies by himself -(even if they will lead to error during execution) - -2nd is similar to what is done in fuel, and allows explicitly set -what is expected to be executed. However we should -not hardcode those deps on resources/actions itself. Because it will lead to -tight-coupling, and some workarounds to skip unwanted resource execution. - -3rd option is manage dependencies based on what is provided by different -resources. For example input: some_service - -Please note that this format is used only to describe intentions. - -:: - image: - ref: - namespace: docker - value: base_image - -Practically it means that docker resource should be executed before -some_service. And if another_service needs to be connected to some_service - -:: - connect: - ref: - namespace: some_service - value: port - -But what if there is no data-dependencies? - -In such case we can add generic way to extend parameters with its -requirements, like: - -:: - - requires: - - ref: - namespace: node - -# (dshulyak) How to add backward dependency? (required_for) diff --git a/docs/spec/current_flow.spec b/docs/spec/current_flow.spec deleted file mode 100644 index 65b2cab6..00000000 --- a/docs/spec/current_flow.spec +++ /dev/null @@ -1,33 +0,0 @@ - - -1. Discovery (ansible all -m facter) -Read list of ips and store them, and search for different data on those -hosts - -2. Create environment ?? with profile, that provides roles (wraps resources) - -3. Add nodes to the env and distribute services - -Assign roles (partitions of services) from the profiles to the nodes. -Store history of applied resources. -Role only matters as initial template. - -4. Change settings provided by resource. - -Imporant/Non important settings ?? -We need defaults for some settings. -Different templates ?? for different backends of resources ?? - -5. Start management - -Periodicly applying stuff ?? - -6. Stop management - -We need to be able to stop things - -7. Run maintenance - -Resources should added to history and management graph will be changed - -8. Start management diff --git a/docs/spec/deployment_blocks.spec b/docs/spec/deployment_blocks.spec deleted file mode 100644 index 03a01ca5..00000000 --- a/docs/spec/deployment_blocks.spec +++ /dev/null @@ -1,60 +0,0 @@ - - -Profile is a global wrapper for all resources in environment. -Profile is versioned and executed by particular driver. -Profile is a container for resources. -Resources can be grouped by roles entities. - -:: - - id: HA - type: profile - version: 0.1 - # adapter for any application that satisfies our requirements - driver: ansible - - -Role is a logical wrapper of resources. -We will provide "opinionated" wrappers, but user should -be able to compose resource in any way. - -:: - - roles: - - id: controller - type: role - resources: [] - - -Resource should have deployment logic for several events: -main deployment, removal of resource, scale up of resource ? -Resource should have list of input parameters that resource provides. -Resources are isolated, and should be executable as long as -required data provided. - -:: - id: rabbitmq - type: resource - driver: ansible_playbook - actions: - run: $install_rabbitmq_playbook - input: - image: fuel/rabbitmq - port: 5572 - # we need to be able to select ip addresses - listen: [{{management.ip}}, {{public.ip}}] - - -:: - id: nova_compute - type: resource - driver: ansible_playbook - actions: - run: $link_to_ansible_playbook - remove: $link_to_another_playbook_that_will_migrate_vms - maintenance: $link_to_playbook_that_will_put_into_maintenance - input: - image: fuel/compute - driver: kvm - rabbitmq_hosts: [] - diff --git a/docs/spec/how_to_do_primary.md b/docs/spec/how_to_do_primary.md deleted file mode 100644 index d8ee2ea0..00000000 --- a/docs/spec/how_to_do_primary.md +++ /dev/null @@ -1,18 +0,0 @@ - -How to approach primary, non-primary resource mangement? --------------------------------------------------------- - -It should be possible to avoid storing primary/non-primary flag -for any particular resource. - -In ansible there is a way to execute particular task from playbook -only once and on concrete host. - -:: - - hosts: [mariadb] - tasks: - - debug: msg="Installing first node" - run_once: true - delegate_to: groups['mariadb'][0] - - debug: msg="Installing all other mariadb nodes" - when: inventory_hostname != groups['mariadb'][0] diff --git a/docs/spec/inventory.spec b/docs/spec/inventory.spec deleted file mode 100644 index 9c5182f2..00000000 --- a/docs/spec/inventory.spec +++ /dev/null @@ -1,14 +0,0 @@ - -Inventory mechanism should provide an easy way for user to change any -piece of deployment configuration. - -It means several things: -1. When writing modules - developer should take into account possibility -of modification it by user. Development may take a little bit longer, but we -are developing tool that will cover not single particular use case, -but a broad range customized production deployments. - -2. Each resource should define what is changeable. - -On the stage before deployment we will be able to know what resources -are used on the level of node/cluster and modify them the way we want. diff --git a/docs/spec/layers.spec b/docs/spec/layers.spec deleted file mode 100644 index 75ee54d3..00000000 --- a/docs/spec/layers.spec +++ /dev/null @@ -1,8 +0,0 @@ - -Layers - -1. REST API of our CORE service // managing basic information -1.1. Extension API // interface for extensions -2. Orchestration // run tasks, periodic tasks, lifecycle management ?? -3. Storage - diff --git a/docs/spec/networking.spec b/docs/spec/networking.spec deleted file mode 100644 index 982349f3..00000000 --- a/docs/spec/networking.spec +++ /dev/null @@ -1,81 +0,0 @@ - -We should make network as separate resource for which we should be -able to add custom handlers. - -This resource will actually serialize tasks, and provide inventory -information. - - -Input: - -Different entities in custom database, like networks and nodes, maybe -interfaces and other things. - -Another input is parameters, like ovs/linux (it may be parameters or -different tasks) - -Output: - - -List of ansible tasks for orhestrator to execute, like - -:: - - shell: ovs-vsctl add-br {{networks.management.bridge}} - -And data to inventory - - -Networking entities ------------------------ - -Network can have a list of subnets that are attached to different node racks. - -Each subnets stores l3 parameters, such as cidr/ip ranges. -L2 parameters such as vlans can be stored on network. - -Roles should be attached to network, and different subnets can not -be used as different roles per rack. - -How it should work: - -1. Untagged network created with some l2 parameters like vlan -2. Created subnet for this network with params (10.0.0.0/24) -3. User attaches network to cluster with roles public/management/storage -4. Role can store l2 parameters also (bridge, mtu) -5. User creates rack and uses this subnet -6. IPs assigned for each node in this rack from each subnet -7. During deployment we are creating bridges based on roles. - -URIs -------- - -/networks/ - -vlan -mtu - -/networks//subnets - -cidr -ip ranges -gateway - -/clusters//networks/ - -Subset of network attached to cluster - -/clusters//networks//network_roles - -Roles attached to particular network - -/network_roles/ - -bridge - -/clusters//racks//subnets - -/clusters//racks//nodes - - - diff --git a/docs/spec/orchestration_in_fuel.yaml b/docs/spec/orchestration_in_fuel.yaml deleted file mode 100644 index 7d260eac..00000000 --- a/docs/spec/orchestration_in_fuel.yaml +++ /dev/null @@ -1,94 +0,0 @@ -roles: - role-name: - name: "" - description: "" - conflicts: - - another_role - update_required: - - another_role - update_once: - - another_role - has_primary: true - limits: - min: int OR "<>" - overrides: - - condition: "<>" - max: 1 - - condition: "<>" - reccomended: 3 - message: "" - restrictions: - - condition: "<>" - message: "" - action: "hide" - fault_tolerance: "2%" - -task_groups: - #Stages - - id: stage_name - type: stage - requires: [another_stage] - #Groups - - id: task_group_name - type: group - role: [role_name] - requires: [stage_name_requirement] - required_for: [stage_name_complete_before] - parameters: - strategy: - type: one_by_one - #OR - type: parallel - amount: 6 #Optional concurency limit - -tasks: - - id: task_name_puppet - type: puppet - role: '*' #optional role to filter task on, used when in a pre or post deployment stage - groups: [task_group_name] - required_for: [task_name, stage_name] - requires: [task_name, task_group_name, stage_name] - condition: "<>" - parameters: - puppet_manifest: path_to_manifests - puppet_modules: path_to_modules - timeout: 3600 - cwd: / - test_pre: - cmd: bash style exec of command to run - test_post: - cmd: bash style exec of command to run - - #all have [roles|groups] and requires /// required_for - - id: task_name_shell - type: shell - parameters: - cmd: bash style exec - timeout: 180 - retries: 10 - interval: 2 - - - id: task_name_upload_file - type: upload_file - role: '*' - parameters: - path: /etc/hiera/nodes.yaml - - - id: task_name_sync - type: sync - role: '*' - parameters: - src: rsync://{MASTER_IP}:/puppet/version - dst: /etc/puppet - timeout: 180 - - - id: task_name_copy_files - type: copy_files - role: '*' - parameters: - files: - - src: source_file/{CLUSTER_ID}/ - dst: dest/localtion - permissions: '0600' - dir_permissions: '0700' - diff --git a/docs/spec/questions b/docs/spec/questions deleted file mode 100644 index f4cb9f9a..00000000 --- a/docs/spec/questions +++ /dev/null @@ -1,52 +0,0 @@ - -Entities ------------- -We clearly need orchestration entities like: -1. resources/roles/services/profiles - -Also we need inventory entities: -2. nodes/networks/ifaces/cluster/release ? - -Q: how to allow developer to extend this entities by modules? -Options: -1. Use completely schema-less data model -(i personally more comfortable with sql-like data models) -2. Dont allow anything except standart entities, if developer needs -to manage custom data - he can create its own micro-service and -then integrate it via custom type of resource -(one which perform query to third-part service) - - -Identities and namespaces ---------------------------- -Identities required for several reasons: -- allow reusage of created once entities -- provide clear api to operate with entities -- specify dependencies with identities - -Will be root namespace polluted with those entities? - -Options: -1. We can create some variable namespace explicitly -2. Or use something like namepsace/entity (example contrail/network) - - -Multiple options for configuration ----------------------------------- - -If there will be same parameters defined within different -modules, how this should behave? - -1. First option is concatenate several options and make a list of choices. -2. Raise a validation error that certain thing can be enabled with another. - -Looks like both should be supported. - - -Deployment code ----------------- - -We need to be able to expose all functionality of any -particular deployment tool. - -Current challenge: how to specify path to some deployment logic? diff --git a/examples/nodes_list.yaml b/examples/nodes_list.yaml deleted file mode 100644 index b5258a21..00000000 --- a/examples/nodes_list.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- id: 6176aaa2-d97f-11e4-8dbe-080027c2ffdb - ip: 10.0.0.2 - ssh_user: vagrant - ssh_private_key_path: /vagrant/tmp/keys/ssh_private - -- id: cc48cf72-df88-11e4-9f5b-080027c2ffdb - ip: 10.0.0.3 - ssh_user: vagrant - ssh_private_key_path: /vagrant/tmp/keys/ssh_private diff --git a/examples/resources/docker.yml b/examples/resources/docker.yml deleted file mode 100644 index 4c4ca12f..00000000 --- a/examples/resources/docker.yml +++ /dev/null @@ -1,11 +0,0 @@ -id: docker -type: resource -handler: ansible -version: v1 -actions: - run: simple/docker/run.yml - remove: simple/docker/remove.yml -input: - base_image: ubuntu -tags: [service/docker] - diff --git a/examples/resources/mariadb.yml b/examples/resources/mariadb.yml deleted file mode 100644 index 0e250843..00000000 --- a/examples/resources/mariadb.yml +++ /dev/null @@ -1,16 +0,0 @@ -id: mariadb -type: resource -handler: ansible -version: v1 -actions: - run: simple/mariadb/run.yml - remove: simple/mariadb/remove.yml - wait: simple/mariadb/wait.yml - users: simple/mariadb/users.yml -input: - name: maria-test - image: tutum/mariadb - users: - - name: test1 - password: test1 -tags: [service/mariadb] diff --git a/examples/resources/simple/docker/remove.yml b/examples/resources/simple/docker/remove.yml deleted file mode 100644 index 0d02ace9..00000000 --- a/examples/resources/simple/docker/remove.yml +++ /dev/null @@ -1,4 +0,0 @@ -- hosts: [docker] - sudo: yes - tasks: - - shell: apt-get remove -y lxc-docker diff --git a/examples/resources/simple/docker/run.yml b/examples/resources/simple/docker/run.yml deleted file mode 100644 index 65f2889a..00000000 --- a/examples/resources/simple/docker/run.yml +++ /dev/null @@ -1,10 +0,0 @@ - -- hosts: [docker] - sudo: yes - tasks: - - shell: docker --version - ignore_errors: true - register: docker_version - - shell: curl -sSL https://get.docker.com/ | sudo sh - when: docker_version|failed - - shell: docker pull {{ docker.base_image }} diff --git a/examples/resources/simple/group_vars/all b/examples/resources/simple/group_vars/all deleted file mode 100644 index 0448d6ad..00000000 --- a/examples/resources/simple/group_vars/all +++ /dev/null @@ -1,14 +0,0 @@ -docker: - base_image: ubuntu - -rabbitmq: - image: tutum/rabbitmq - name: rabbit-test1 - -user: - name: test_name - password: test_pass - -mariadb: - name: maria-test - image: tutum/mariadb diff --git a/examples/resources/simple/host_vars/first b/examples/resources/simple/host_vars/first deleted file mode 100644 index eb566b39..00000000 --- a/examples/resources/simple/host_vars/first +++ /dev/null @@ -1,6 +0,0 @@ - -networks: - default: - ip: 10.0.0.2 - cidr: 10.0.0.0/24 - interface: eth1 diff --git a/examples/resources/simple/hosts b/examples/resources/simple/hosts deleted file mode 100644 index 79cd2cb4..00000000 --- a/examples/resources/simple/hosts +++ /dev/null @@ -1,20 +0,0 @@ - -first ansible_connection=local ansible_ssh_host=10.0.0.2 -second ansible_ssh_host=10.0.0.3 - -[docker] - -first -second - -[rabbitmq] - -first - -[user] - -first - -[mariadb] - -first diff --git a/examples/resources/simple/mariadb/remove.yml b/examples/resources/simple/mariadb/remove.yml deleted file mode 100644 index b1afd0e4..00000000 --- a/examples/resources/simple/mariadb/remove.yml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [mariadb] - sudo: yes - tasks: - - shell: docker stop {{ mariadb.name }} - - shell: docker rm {{ mariadb.name }} diff --git a/examples/resources/simple/mariadb/run.yml b/examples/resources/simple/mariadb/run.yml deleted file mode 100644 index 016fa036..00000000 --- a/examples/resources/simple/mariadb/run.yml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [mariadb] - sudo: yes - tasks: - - shell: docker run -d --net="host" --privileged \ - --name {{mariadb.name}} {{mariadb.image}} diff --git a/examples/resources/simple/mariadb/users.yml b/examples/resources/simple/mariadb/users.yml deleted file mode 100644 index 0a14d1c6..00000000 --- a/examples/resources/simple/mariadb/users.yml +++ /dev/null @@ -1,11 +0,0 @@ - -- hosts: [mariadb] - sudo: yes - tasks: - - command: docker exec -t {{mariadb.name}} \ - mysql -uroot \ - -e "CREATE USER '{{item.name}}'@'%' IDENTIFIED BY '{{item.password}}'" - with_items: mariadb.users - - command: docker exec -t {{mariadb.name}} \ - mysql -uroot -e "GRANT ALL PRIVILEGES ON *.* TO '{{item.name}}'@'%' WITH GRANT OPTION" - with_items: mariadb.users diff --git a/examples/resources/simple/mariadb/wait.yml b/examples/resources/simple/mariadb/wait.yml deleted file mode 100644 index 800c5b18..00000000 --- a/examples/resources/simple/mariadb/wait.yml +++ /dev/null @@ -1,9 +0,0 @@ - -- hosts: [mariadb] - sudo: yes - tasks: - - shell: docker exec -t {{mariadb.name}} mysql -uroot -e "select 1" - register: result - until: result.rc == 0 - retries: 10 - delay: 0.5 diff --git a/examples/resources/simple/rabbitmq/remove.yml b/examples/resources/simple/rabbitmq/remove.yml deleted file mode 100644 index 0191fcac..00000000 --- a/examples/resources/simple/rabbitmq/remove.yml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [rabbitmq] - sudo: yes - tasks: - - shell: docker stop {{ rabbitmq.name }} - - shell: docker rm {{ rabbitmq.name }} diff --git a/examples/resources/simple/rabbitmq/run.yml b/examples/resources/simple/rabbitmq/run.yml deleted file mode 100644 index 5df459e6..00000000 --- a/examples/resources/simple/rabbitmq/run.yml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [rabbitmq] - sudo: yes - tasks: - - shell: docker run --net="host" --privileged \ - --name {{ rabbitmq.name }} -d {{ rabbitmq.image }} diff --git a/examples/resources/simple/remove.yml b/examples/resources/simple/remove.yml deleted file mode 100644 index d178fe9c..00000000 --- a/examples/resources/simple/remove.yml +++ /dev/null @@ -1,5 +0,0 @@ - -- include: user/remove.yml -- include: rabbitmq/remove.yml -- include: mariadb/remove.yml -- include: docker/remove.yml diff --git a/examples/resources/simple/run.yml b/examples/resources/simple/run.yml deleted file mode 100644 index a5fce714..00000000 --- a/examples/resources/simple/run.yml +++ /dev/null @@ -1,5 +0,0 @@ - -- include: docker/run.yml -- include: rabbitmq/run.yml -- include: mariadb/run.yml -- include: user/run.yml diff --git a/examples/resources/simple/user/remove.yml b/examples/resources/simple/user/remove.yml deleted file mode 100644 index 954a9623..00000000 --- a/examples/resources/simple/user/remove.yml +++ /dev/null @@ -1,12 +0,0 @@ - -- hosts: [rabbitmq] - sudo: yes - tasks: - - shell: docker exec -i {{rabbitmq.name}} /usr/sbin/rabbitmqctl delete_user {{user.name}} - run_once: true - -- hosts: [mariadb] - sudo: yes - tasks: - - command: docker exec -t {{mariadb.name}} \ - mysql -uroot -e "DROP USER '{{user.name}}'" diff --git a/examples/resources/simple/user/run.yml b/examples/resources/simple/user/run.yml deleted file mode 100644 index c1fe60e4..00000000 --- a/examples/resources/simple/user/run.yml +++ /dev/null @@ -1,6 +0,0 @@ - -- hosts: [rabbitmq] - sudo: yes - tasks: - - command: docker exec -t {{rabbitmq.name}} /usr/sbin/rabbitmqctl add_user {{user.name}} {{user.password}} - run_once: true diff --git a/kolla.yml b/kolla.yml deleted file mode 100644 index b173cb62..00000000 --- a/kolla.yml +++ /dev/null @@ -1,20 +0,0 @@ - -- hosts: all - sudo: yes - tasks: - - git: repo=https://github.com/stackforge/kolla.git - - shell: sh kolla/tools/genenv - - shell: docker pull {{item}} - with_items: - - kollaglue/centos-rdo-glance-registry - - kollaglue/centos-rdo-glance-api - - kollaglue/centos-rdo-keystone - - kollaglue/centos-rdo-mariadb-data - - kollaglue/centos-rdo-mariadb-app - - kollaglue/centos-rdo-nova-conductor:latest - - kollaglue/centos-rdo-nova-api:latest - - kollaglue/centos-rdo-nova-scheduler:latest - - kollaglue/centos-rdo-nova-libvirt - - kollaglue/centos-rdo-nova-network - - kollaglue/centos-rdo-nova-compute - - kollaglue/centos-rdo-rabbitmq diff --git a/main.yml b/main.yml deleted file mode 100644 index bc40a5a1..00000000 --- a/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- hosts: all - sudo: yes - tasks: - # Setup additional development tools - - apt: name=vim state=present - - apt: name=tmux state=present - - apt: name=htop state=present - - apt: name=python-virtualenv state=present - - apt: name=virtualenvwrapper state=present - - apt: name=ipython state=present - - apt: name=python-pudb state=present - # Setup development env for solar - - shell: python setup.py develop chdir=/vagrant/solar diff --git a/solar/MANIFEST.in b/solar/MANIFEST.in deleted file mode 100644 index 0a079c06..00000000 --- a/solar/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include *.txt -recursive-include solar/ * diff --git a/solar/requirements.txt b/solar/requirements.txt deleted file mode 100644 index 60fc0932..00000000 --- a/solar/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -six>=1.9.0 -pyyaml -jinja2 diff --git a/solar/setup.py b/solar/setup.py deleted file mode 100644 index 9907857c..00000000 --- a/solar/setup.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from setuptools import find_packages -from setuptools import setup - -def find_requires(): - prj_root = os.path.dirname(os.path.realpath(__file__)) - requirements = [] - with open(u'{0}/requirements.txt'.format(prj_root), 'r') as reqs: - requirements = reqs.readlines() - return requirements - - -setup( - name='solar', - version='0.0.1', - description='Deployment tool', - long_description="""Deployment tool""", - classifiers=[ - "Development Status :: 1 - Beta", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 2.6", - "Programming Language :: Python :: 2.7", - "Topic :: System :: Software Distribution"], - author='Mirantis Inc.', - author_email='product@mirantis.com', - url='http://mirantis.com', - keywords='deployment', - packages=find_packages(), - zip_safe=False, - install_requires=find_requires(), - include_package_data=True, - entry_points={ - 'console_scripts': [ - 'solar = solar.cli:main']}) diff --git a/solar/solar/__init__.py b/solar/solar/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/cli.py b/solar/solar/cli.py deleted file mode 100644 index 7b4d34ab..00000000 --- a/solar/solar/cli.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2015 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Solar CLI api - -On create "golden" resource should be moved to special place -""" - -import argparse -import subprocess -import os -import sys -import pprint - -import textwrap -import yaml - -from solar import utils -from solar import extensions -from solar.interfaces.db import get_db - -# NOTE: these are extensions, they shouldn't be imported here -from solar.extensions.modules import ansible -from solar.extensions.modules.discovery import Discovery - - -class Cmd(object): - - def __init__(self): - self.parser = argparse.ArgumentParser( - description=textwrap.dedent(__doc__), - formatter_class=argparse.RawDescriptionHelpFormatter) - self.subparser = self.parser.add_subparsers( - title='actions', - description='Supported actions', - help='Provide of one valid actions') - self.register_actions() - self.db = get_db() - - def parse(self, args): - parsed = self.parser.parse_args(args) - return parsed.func(parsed) - - def register_actions(self): - - parser = self.subparser.add_parser('discover') - parser.set_defaults(func=getattr(self, 'discover')) - - # Perform configuration - parser = self.subparser.add_parser('configure') - parser.set_defaults(func=getattr(self, 'configure')) - parser.add_argument( - '-p', - '--profile') - parser.add_argument( - '-a', - '--actions', - nargs='+') - parser.add_argument( - '-pa', - '--profile_action') - - # Profile actions - parser = self.subparser.add_parser('profile') - parser.set_defaults(func=getattr(self, 'profile')) - parser.add_argument('-l', '--list', dest='list', action='store_true') - group = parser.add_argument_group('create') - group.add_argument('-c', '--create', dest='create', action='store_true') - group.add_argument('-t', '--tags', nargs='+', default=['env/test_env']) - group.add_argument('-i', '--id', default=utils.generate_uuid()) - - def profile(self, args): - if args.create: - params = {'tags': args.tags, 'id': args.id} - profile_template_path = os.path.join(os.path.dirname(__file__), 'templates', 'profile.yml') - data = yaml.load(utils.render_template(profile_template_path, params)) - self.db.store('profiles', data) - else: - pprint.pprint(self.db.get_list('profiles')) - - def configure(self, args): - profile = self.db.get_record('profiles', args.profile) - extensions.find_by_provider_from_profile( - profile, 'configure').configure( - actions=args.actions, - profile_action=args.profile_action) - - def discover(self, args): - Discovery({'id': 'discovery'}).discover() - - - -def main(): - api = Cmd() - api.parse(sys.argv[1:]) diff --git a/solar/solar/core/__init__.py b/solar/solar/core/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/core/extensions_manager.py b/solar/solar/core/extensions_manager.py deleted file mode 100644 index 7d1f710a..00000000 --- a/solar/solar/core/extensions_manager.py +++ /dev/null @@ -1,17 +0,0 @@ -from solar import extensions -from solar import errors - - -class ExtensionsManager(object): - - def __init__(self, profile): - self.profile = profile - - def get_data(self, key): - """Finds data by extensions provider""" - providers = filter(lambda e: key in e.PROVIDES, extensions.get_all_extensions()) - - if not providers: - raise errors.CannotFindExtension('Cannot find extension which provides "{0}"'.format(key)) - - return getattr(providers[0](self.profile), key)() diff --git a/solar/solar/core/profile.py b/solar/solar/core/profile.py deleted file mode 100644 index f477aa28..00000000 --- a/solar/solar/core/profile.py +++ /dev/null @@ -1,10 +0,0 @@ - -class Profile(object): - - def __init__(self, profile): - self._profile = profile - self.tags = set(profile['tags']) - self.extensions = profile.get('extensions', []) - - def get(self, key): - return self._profile.get(key, None) diff --git a/solar/solar/errors.py b/solar/solar/errors.py deleted file mode 100644 index 022d8121..00000000 --- a/solar/solar/errors.py +++ /dev/null @@ -1,10 +0,0 @@ -class SolarError(Exception): - pass - - -class CannotFindID(SolarError): - pass - - -class CannotFindExtension(SolarError): - pass diff --git a/solar/solar/extensions/__init__.py b/solar/solar/extensions/__init__.py deleted file mode 100644 index 7374a5ca..00000000 --- a/solar/solar/extensions/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -import glob -import os - -from solar import utils -from solar.core.profile import Profile -from solar.extensions.base import BaseExtension -# Import all modules from the directory in order -# to make subclasses for extensions work -modules = glob.glob(os.path.join(os.path.dirname(__file__), 'modules', '*.py')) -[__import__('%s.%s' % ('modules', os.path.basename(f)[:-3]), locals(), globals()) for f in modules] - - -def get_all_extensions(): - return BaseExtension.__subclasses__() - - -def find_extension(id_, version): - extensions = filter( - lambda e: e.ID == id_ and e.VERSION == version, - get_all_extensions()) - - if not extensions: - return None - - return extensions[0] - - -def find_by_provider_from_profile(profile, provider): - profile_ = Profile(profile) - extensions = profile_.extensions - result = None - for ext in extensions: - result = find_extension(ext['id'], ext['version']) - if result: - break - - return result(profile_) diff --git a/solar/solar/extensions/base.py b/solar/solar/extensions/base.py deleted file mode 100644 index 2437dc95..00000000 --- a/solar/solar/extensions/base.py +++ /dev/null @@ -1,28 +0,0 @@ -from solar.interfaces.db import get_db - - -class BaseExtension(object): - - ID = None - NAME = None - PROVIDES = [] - - def __init__(self, profile, core_manager=None, config=None): - self.config = config or {} - self.uid = self.ID - self.db = get_db() - self.profile = profile - - from solar.core.extensions_manager import ExtensionsManager - self.core = core_manager or ExtensionsManager(self.profile) - - def prepare(self): - """Make some changes in database state.""" - - @property - def input(self): - return self.config.get('input', {}) - - @property - def output(self): - return self.config.get('output', {}) diff --git a/solar/solar/extensions/modules/__init__.py b/solar/solar/extensions/modules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/extensions/modules/ansible.py b/solar/solar/extensions/modules/ansible.py deleted file mode 100644 index f311a032..00000000 --- a/solar/solar/extensions/modules/ansible.py +++ /dev/null @@ -1,178 +0,0 @@ -import os - -import subprocess -import yaml - -from solar import utils -from solar.extensions import base - -from jinja2 import Template - - -ANSIBLE_INVENTORY = """ -{% for node in nodes %} -{{node.name}} ansible_ssh_host={{node.ip}} ansible_connection=ssh ansible_ssh_user={{node.ssh_user}} ansible_ssh_private_key_file={{node.ssh_private_key_path}} -{% endfor %} - -{% for res in resources %} - [{{ res.id }}] - {% for node in nodes_mapping[res.id] %} - {{node['name']}} - {% endfor %} -{% endfor %} -""" - - -def playbook(resource_path, playbook_name): - resource_dir = os.path.dirname(resource_path) - return {'include': '{0}'.format( - os.path.join(resource_dir, playbook_name))} - - -class AnsibleOrchestration(base.BaseExtension): - - ID = 'ansible' - VERSION = '1.0.0' - PROVIDES = ['configure'] - - def __init__(self, *args, **kwargs): - super(AnsibleOrchestration, self).__init__(*args, **kwargs) - - self.nodes = self._get_nodes() - self.resources = self._get_resources_for_nodes(self.nodes) - - def _get_nodes(self): - nodes = [] - for node in self.core.get_data('nodes_resources'): - if self.profile.tags <= set(node.get('tags', [])): - nodes.append(node) - - return nodes - - def _get_resources_for_nodes(self, nodes): - """Retrieves resources which required for nodes deployment""" - resources = [] - - for node in nodes: - node_tags = set(node.get('tags', [])) - result_resources = self._get_resources_with_tags(node_tags) - resources.extend(result_resources) - - return dict((r['id'], r) for r in resources).values() - - def _get_resources_with_tags(self, tags): - resources = [] - for resource in self.core.get_data('resources'): - resource_tags = set(resource.get('tags', [])) - # If resource without tags, it means that it should - # not be assigned to any node - if not resource_tags: - continue - if resource_tags <= tags: - resources.append(resource) - - return resources - - @property - def inventory(self): - temp = Template(ANSIBLE_INVENTORY) - return temp.render( - nodes_mapping=self._make_nodes_services_mapping(), - resources=self.resources, - nodes=self.nodes) - - def _make_nodes_services_mapping(self): - mapping = {} - for resource in self.resources: - mapping[resource['id']] = self._get_nodes_for_resource(resource) - - return mapping - - def _get_nodes_for_resource(self, resource): - resource_tags = set(resource['tags']) - nodes = [] - for node in self.nodes: - if resource_tags <= set(node['tags']): - nodes.append(node) - - return nodes - - @property - def vars(self): - result = {} - - for res in self.resources: - compiled = Template( - utils.yaml_dump({res['id']: res.get('input', {})})) - compiled = yaml.load(compiled.render(**result)) - - result.update(compiled) - - return result - - def prepare_from_profile(self, profile_action): - - paths = self.profile.get(profile_action) - if paths is None: - raise Exception('Action %s not supported', profile_action) - - return self.prepare_many(paths) - - def prepare_many(self, paths): - - ansible_actions = [] - - for path in paths: - ansible_actions.extend(self.prepare_one(path)) - - return ansible_actions - - def prepare_one(self, path): - """ - :param path: docker.actions.run or openstack.action - """ - steps = path.split('.') - - if len(steps) < 2: - raise Exception('Path %s is not valid,' - ' should be atleast 2 items', path) - - resources = filter(lambda r: r['id'] == steps[0], self.resources) - # NOTE: If there are not resouces for this tags, just skip it - if not resources: - return [] - - resource = resources[0] - - action = resource - for step in steps[1:]: - action = action[step] - - result = [] - if isinstance(action, list): - for item in action: - result.append(playbook(resource['parent_path'], item)) - else: - result.append(playbook(resource['parent_path'], action)) - - return result - - def configure(self, profile_action='run', actions=None): - utils.create_dir('tmp/group_vars') - utils.write_to_file(self.inventory, 'tmp/hosts') - utils.yaml_dump_to(self.vars, 'tmp/group_vars/all') - - if actions: - prepared = self.prepare_many(actions) - elif profile_action: - prepared = self.prepare_from_profile(profile_action) - else: - raise Exception('Either profile_action ' - 'or actions should be provided.') - - utils.yaml_dump_to(prepared, 'tmp/main.yml') - - sub = subprocess.Popen( - ['ansible-playbook', '-i', 'tmp/hosts', 'tmp/main.yml'], - env=dict(os.environ, ANSIBLE_HOST_KEY_CHECKING='False')) - out, err = sub.communicate() diff --git a/solar/solar/extensions/modules/discovery.py b/solar/solar/extensions/modules/discovery.py deleted file mode 100644 index 27ca35dd..00000000 --- a/solar/solar/extensions/modules/discovery.py +++ /dev/null @@ -1,54 +0,0 @@ -import io -import os - -import yaml - -from solar.extensions import base - - -class Discovery(base.BaseExtension): - - VERSION = '1.0.0' - ID = 'discovery' - PROVIDES = ['nodes_resources'] - - COLLECTION_NAME = 'nodes' - - FILE_PATH = os.path.join( - # TODO(pkaminski): no way we need '..' here... - os.path.dirname(__file__), '..', '..', '..', '..', - 'examples', 'nodes_list.yaml') - - def discover(self): - nodes_to_store = [] - with io.open(self.FILE_PATH) as f: - nodes = yaml.load(f) - - for node in nodes: - exist_node = self.db.get_record(self.COLLECTION_NAME, node['id']) - if not exist_node: - node['tags'] = ['node/{0}'.format(node['id'])] - nodes_to_store.append(node) - - self.db.store_list(self.COLLECTION_NAME, nodes_to_store) - - def nodes_resources(self): - nodes_list = self.db.get_list(self.COLLECTION_NAME) - nodes_resources = [] - - for node in nodes_list: - node_resource = {} - node_resource['id'] = node['id'] - node_resource['name'] = node['id'] - node_resource['handler'] = 'data' - node_resource['type'] = 'resource' - node_resource['version'] = self.VERSION - node_resource['tags'] = node['tags'] - node_resource['output'] = node - node_resource['ip'] = node['ip'] - node_resource['ssh_user'] = node['ssh_user'] - node_resource['ssh_private_key_path'] = node['ssh_private_key_path'] - - nodes_resources.append(node_resource) - - return nodes_resources diff --git a/solar/solar/extensions/modules/playbook.py b/solar/solar/extensions/modules/playbook.py deleted file mode 100644 index 5a5c5aad..00000000 --- a/solar/solar/extensions/modules/playbook.py +++ /dev/null @@ -1,11 +0,0 @@ - -from solar.extensions import base - - -class Playbook(base.BaseExtension): - - ID = 'ansible_playbook' - VERSION = '1.0.0' - - def execute(self, action): - return self.config.get('actions', {}).get(action, []) diff --git a/solar/solar/extensions/modules/resources.py b/solar/solar/extensions/modules/resources.py deleted file mode 100644 index 17e56c5a..00000000 --- a/solar/solar/extensions/modules/resources.py +++ /dev/null @@ -1,26 +0,0 @@ -import os - -from solar import utils -from solar.extensions import base - - -class Resources(base.BaseExtension): - - VERSION = '1.0.0' - ID = 'resources' - PROVIDES = ['resources'] - - # Rewrite it to use golden resources from - # the storage - FILE_MASK = os.path.join( - # TODO(pkaminski): no way we need '..' here... - os.path.dirname(__file__), '..', '..', '..', '..', - 'examples', 'resources', '*.yml') - - def resources(self): - resources = [] - for file_path in utils.find_by_mask(self.FILE_MASK): - res = utils.yaml_load(file_path) - res['parent_path'] = file_path - resources.append(res) - return resources diff --git a/solar/solar/interfaces/__init__.py b/solar/solar/interfaces/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/interfaces/db/__init__.py b/solar/solar/interfaces/db/__init__.py deleted file mode 100644 index e633458d..00000000 --- a/solar/solar/interfaces/db/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from solar.interfaces.db.file_system_db import FileSystemDB - -mapping = { - 'file_system': FileSystemDB -} - -def get_db(): - # Should be retrieved from config - return mapping['file_system']() diff --git a/solar/solar/interfaces/db/file_system_db.py b/solar/solar/interfaces/db/file_system_db.py deleted file mode 100644 index 0f96fba9..00000000 --- a/solar/solar/interfaces/db/file_system_db.py +++ /dev/null @@ -1,101 +0,0 @@ -from solar.third_party.dir_dbm import DirDBM - - -import os -from fnmatch import fnmatch -from copy import deepcopy - -import yaml - -from solar import utils -from solar import errors - - -def get_files(path, pattern): - for root, dirs, files in os.walk(path): - for file_name in files: - if fnmatch(file_name, pattern): - yield os.path.join(root, file_name) - - -class FileSystemDB(DirDBM): - RESOURCES_PATH = './schema/resources' - STORAGE_PATH = 'tmp/storage/' - - def __init__(self): - utils.create_dir(self.STORAGE_PATH) - super(FileSystemDB, self).__init__(self.STORAGE_PATH) - self.entities = {} - - def create_resource(self, resource, tags): - self.from_files(self.RESOURCES_PATH) - - resource_uid = '{0}_{1}'.format(resource, '_'.join(tags)) - data = deepcopy(self.get(resource)) - data['tags'] = tags - self[resource_uid] = data - - def get_copy(self, key): - return deepcopy(self[key]) - - def add(self, obj): - if 'id' in obj: - self.entities[obj['id']] = obj - - def store_from_file(self, file_path): - self.store(file_path) - - def store(self, collection, obj): - if 'id' in obj: - self[self._make_key(collection, obj['id'])] = obj - else: - raise errors.CannotFindID('Cannot find id for object {0}'.format(obj)) - - def store_list(self, collection, objs): - for obj in objs: - self.store(collection, obj) - - def get_list(self, collection): - collection_keys = filter( - lambda k: k.startswith('{0}-'.format(collection)), - self.keys()) - - return map(lambda k: self[k], collection_keys) - - def get_record(self, collection, _id): - key = self._make_key(collection, _id) - if key not in self: - return None - - return self[key] - - def _make_key(self, collection, _id): - return '{0}-{1}'.format(collection, _id) - - def add_resource(self, resource): - if 'id' in resource: - self.entities[resource['id']] = resource - - def get(self, resource_id): - return self.entities[resource_id] - - def from_files(self, path): - for file_path in get_files(path, '*.yml'): - with open(file_path) as f: - entity = f - - self.add_resource(entity) - - def _readFile(self, path): - return yaml.load(super(FileSystemDB, self)._readFile(path)) - - def _writeFile(self, path, data): - return super(FileSystemDB, self)._writeFile(path, utils.yaml_dump(data)) - - def _encode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key - - def _decode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key diff --git a/solar/solar/templates/profile.yml b/solar/solar/templates/profile.yml deleted file mode 100644 index dc241323..00000000 --- a/solar/solar/templates/profile.yml +++ /dev/null @@ -1,22 +0,0 @@ -id: {{id}} -type: profile - -extensions: - - id: file_discovery - version: '1.0.0' - - id: ansible - version: '1.0.0' - -tags: {{tags}} - -# NOTE(dshulyak) it is mandatory for some profiles to user graph based -# api to provide order of execution for different events -run: - - docker.actions.run - - mariadb.actions.run - - mariadb.actions.wait - - mariadb.actions.users - -remove: - - mariadb.actions.remove - - docker.actions.remove diff --git a/solar/solar/third_party/__init__.py b/solar/solar/third_party/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/third_party/dir_dbm.py b/solar/solar/third_party/dir_dbm.py deleted file mode 100644 index ba64c989..00000000 --- a/solar/solar/third_party/dir_dbm.py +++ /dev/null @@ -1,303 +0,0 @@ -# -*- test-case-name: twisted.test.test_dirdbm -*- -# -# Copyright (c) Twisted Matrix Laboratories. -# See LICENSE for details. - - - -""" -DBM-style interface to a directory. -Each key is stored as a single file. This is not expected to be very fast or -efficient, but it's good for easy debugging. -DirDBMs are *not* thread-safe, they should only be accessed by one thread at -a time. -No files should be placed in the working directory of a DirDBM save those -created by the DirDBM itself! -Maintainer: Itamar Shtull-Trauring -""" - - -import os -import types -import base64 -import glob - -try: - import cPickle as pickle -except ImportError: - import pickle - -try: - _open -except NameError: - _open = open - - -class DirDBM(object): - """A directory with a DBM interface. - - This class presents a hash-like interface to a directory of small, - flat files. It can only use strings as keys or values. - """ - - def __init__(self, name): - """ - @type name: str - @param name: Base path to use for the directory storage. - """ - self.dname = os.path.abspath(name) - if not os.path.isdir(self.dname): - os.mkdir(self.dname) - else: - # Run recovery, in case we crashed. we delete all files ending - # with ".new". Then we find all files who end with ".rpl". If a - # corresponding file exists without ".rpl", we assume the write - # failed and delete the ".rpl" file. If only a ".rpl" exist we - # assume the program crashed right after deleting the old entry - # but before renaming the replacement entry. - # - # NOTE: '.' is NOT in the base64 alphabet! - for f in glob.glob(os.path.join(self.dname, "*.new")): - os.remove(f) - replacements = glob.glob(os.path.join(self.dname, "*.rpl")) - for f in replacements: - old = f[:-4] - if os.path.exists(old): - os.remove(f) - else: - os.rename(f, old) - - def _encode(self, k): - """Encode a key so it can be used as a filename. - """ - # NOTE: '_' is NOT in the base64 alphabet! - return base64.encodestring(k).replace('\n', '_').replace("/", "-") - - def _decode(self, k): - """Decode a filename to get the key. - """ - return base64.decodestring(k.replace('_', '\n').replace("-", "/")) - - def _readFile(self, path): - """Read in the contents of a file. - - Override in subclasses to e.g. provide transparently encrypted dirdbm. - """ - f = _open(path, "rb") - s = f.read() - f.close() - return s - - def _writeFile(self, path, data): - """Write data to a file. - - Override in subclasses to e.g. provide transparently encrypted dirdbm. - """ - f = _open(path, "wb") - f.write(data) - f.flush() - f.close() - - def __len__(self): - """ - @return: The number of key/value pairs in this Shelf - """ - return len(os.listdir(self.dname)) - - def __setitem__(self, k, v): - """ - C{dirdbm[k] = v} - Create or modify a textfile in this directory - @type k: str - @param k: key to set - - @type v: str - @param v: value to associate with C{k} - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - # NOTE: Can be not a string if _writeFile in the child is redefined - # assert type(v) == types.StringType, "DirDBM value must be a string" - k = self._encode(k) - - # we create a new file with extension .new, write the data to it, and - # if the write succeeds delete the old file and rename the new one. - old = os.path.join(self.dname, k) - if os.path.exists(old): - new = old + ".rpl" # replacement entry - else: - new = old + ".new" # new entry - try: - self._writeFile(new, v) - except: - os.remove(new) - raise - else: - if os.path.exists(old): os.remove(old) - os.rename(new, old) - - def __getitem__(self, k): - """ - C{dirdbm[k]} - Get the contents of a file in this directory as a string. - - @type k: str - @param k: key to lookup - - @return: The value associated with C{k} - @raise KeyError: Raised when there is no such key - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - path = os.path.join(self.dname, self._encode(k)) - try: - return self._readFile(path) - except: - raise KeyError, k - - def __delitem__(self, k): - """ - C{del dirdbm[foo]} - Delete a file in this directory. - - @type k: str - @param k: key to delete - - @raise KeyError: Raised when there is no such key - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - k = self._encode(k) - try: os.remove(os.path.join(self.dname, k)) - except (OSError, IOError): raise KeyError(self._decode(k)) - - def keys(self): - """ - @return: a C{list} of filenames (keys). - """ - return map(self._decode, os.listdir(self.dname)) - - def values(self): - """ - @return: a C{list} of file-contents (values). - """ - vals = [] - keys = self.keys() - for key in keys: - vals.append(self[key]) - return vals - - def items(self): - """ - @return: a C{list} of 2-tuples containing key/value pairs. - """ - items = [] - keys = self.keys() - for key in keys: - items.append((key, self[key])) - return items - - def has_key(self, key): - """ - @type key: str - @param key: The key to test - - @return: A true value if this dirdbm has the specified key, a faluse - value otherwise. - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - key = self._encode(key) - return os.path.isfile(os.path.join(self.dname, key)) - - def setdefault(self, key, value): - """ - @type key: str - @param key: The key to lookup - - @param value: The value to associate with key if key is not already - associated with a value. - """ - if not self.has_key(key): - self[key] = value - return value - return self[key] - - def get(self, key, default = None): - """ - @type key: str - @param key: The key to lookup - - @param default: The value to return if the given key does not exist - - @return: The value associated with C{key} or C{default} if not - C{self.has_key(key)} - """ - if self.has_key(key): - return self[key] - else: - return default - - def __contains__(self, key): - """ - C{key in dirdbm} - @type key: str - @param key: The key to test - - @return: A true value if C{self.has_key(key)}, a false value otherwise. - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - key = self._encode(key) - return os.path.isfile(os.path.join(self.dname, key)) - - def update(self, dict): - """ - Add all the key/value pairs in C{dict} to this dirdbm. Any conflicting - keys will be overwritten with the values from C{dict}. - @type dict: mapping - @param dict: A mapping of key/value pairs to add to this dirdbm. - """ - for key, val in dict.items(): - self[key]=val - - def copyTo(self, path): - """ - Copy the contents of this dirdbm to the dirdbm at C{path}. - - @type path: C{str} - @param path: The path of the dirdbm to copy to. If a dirdbm - exists at the destination path, it is cleared first. - - @rtype: C{DirDBM} - @return: The dirdbm this dirdbm was copied to. - """ - path = os.path.abspath(path) - assert path != self.dname - - d = self.__class__(path) - d.clear() - for k in self.keys(): - d[k] = self[k] - return d - - def clear(self): - """ - Delete all key/value pairs in this dirdbm. - """ - for k in self.keys(): - del self[k] - - def close(self): - """ - Close this dbm: no-op, for dbm-style interface compliance. - """ - - def getModificationTime(self, key): - """ - Returns modification time of an entry. - - @return: Last modification date (seconds since epoch) of entry C{key} - @raise KeyError: Raised when there is no such key - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - path = os.path.join(self.dname, self._encode(key)) - if os.path.isfile(path): - return os.path.getmtime(path) - else: - raise KeyError, key diff --git a/solar/solar/utils.py b/solar/solar/utils.py deleted file mode 100644 index 246cad7d..00000000 --- a/solar/solar/utils.py +++ /dev/null @@ -1,61 +0,0 @@ -import io -import glob -import yaml -import logging -import os - -from uuid import uuid4 - -from jinja2 import Template - -logger = logging.getLogger(__name__) - - -def create_dir(dir_path): - logger.debug(u'Creating directory %s', dir_path) - if not os.path.isdir(dir_path): - os.makedirs(dir_path) - - -def yaml_load(file_path): - with io.open(file_path) as f: - result = yaml.load(f) - - return result - - -def yaml_dump(yaml_data): - return yaml.dump(yaml_data, default_flow_style=False) - - -def write_to_file(data, file_path): - with open(file_path, 'w') as f: - f.write(data) - - -def yaml_dump_to(data, file_path): - write_to_file(yaml_dump(data), file_path) - - -def find_by_mask(mask): - for file_path in glob.glob(mask): - yield os.path.abspath(file_path) - - -def load_by_mask(mask): - result = [] - for file_path in find_by_mask(mask): - result.append(yaml_load(file_path)) - - return result - - -def generate_uuid(): - return str(uuid4()) - - -def render_template(template_path, params): - with io.open(template_path) as f: - temp = Template(f.read()) - - return temp.render(**params) From a50f0933c7d07102d63935e6ac1ee57ef7a2c9dc Mon Sep 17 00:00:00 2001 From: Vagrant User Date: Wed, 15 Apr 2015 18:43:02 +0000 Subject: [PATCH 02/87] Init x --- README | 15 ++++++ __init__.py | 0 actions.py | 11 ++++ db.py | 11 ++++ handlers.py | 62 +++++++++++++++++++++++ resource.py | 73 +++++++++++++++++++++++++++ resources/data_container/meta.yaml | 10 ++++ resources/data_container/remove.yml | 6 +++ resources/data_container/run.yml | 6 +++ resources/docker/docker.yml | 10 ++++ resources/docker_container/meta.yaml | 9 ++++ resources/docker_container/remove.yml | 6 +++ resources/docker_container/run.yml | 6 +++ resources/file/meta.yaml | 8 +++ resources/file/remove.sh | 3 ++ resources/file/run.sh | 3 ++ resources/mariadb/meta.yaml | 9 ++++ resources/mariadb/remove.yml | 6 +++ resources/mariadb/run.yml | 6 +++ resources/mariadb_table/meta.yaml | 10 ++++ resources/mariadb_user/meta.yaml | 9 ++++ resources/ro_node/meta.yaml | 8 +++ signals.py | 34 +++++++++++++ 23 files changed, 321 insertions(+) create mode 100644 README create mode 100644 __init__.py create mode 100644 actions.py create mode 100644 db.py create mode 100644 handlers.py create mode 100644 resource.py create mode 100644 resources/data_container/meta.yaml create mode 100644 resources/data_container/remove.yml create mode 100644 resources/data_container/run.yml create mode 100644 resources/docker/docker.yml create mode 100644 resources/docker_container/meta.yaml create mode 100644 resources/docker_container/remove.yml create mode 100644 resources/docker_container/run.yml create mode 100644 resources/file/meta.yaml create mode 100644 resources/file/remove.sh create mode 100644 resources/file/run.sh create mode 100644 resources/mariadb/meta.yaml create mode 100644 resources/mariadb/remove.yml create mode 100644 resources/mariadb/run.yml create mode 100644 resources/mariadb_table/meta.yaml create mode 100644 resources/mariadb_user/meta.yaml create mode 100644 resources/ro_node/meta.yaml create mode 100644 signals.py diff --git a/README b/README new file mode 100644 index 00000000..60333668 --- /dev/null +++ b/README @@ -0,0 +1,15 @@ +Usage: +Creating resources: + +from x import resource +node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'user':'vagrant'}) +node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'user':'vagrant'}) +keystone_db_data = resource.create('mariadb_keystone_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'remote_user': '', 'ssh_key': ''}, connections={'host' : 'node2.ip', 'ssh_key':'node2.ssh_key', 'remote_user':'node2.user'}) +nova_db_data = resource.create('mariadb_nova_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'remote_user': '', 'ssh_key': ''}, connections={'host' : 'node1.ip', 'ssh_key':'node1.ssh_key', 'remote_user':'node1.user'}) + +to make connection after resource is created use signal.connect + +*** WARNNING *** +Resource DB is stored only in memory, if you close python interpretet you will lost it. +It can be recreated from resources but it's not done yet. +Connections are stored only in memory. It can be easly dumped as JSON file diff --git a/__init__.py b/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/actions.py b/actions.py new file mode 100644 index 00000000..93aa8a19 --- /dev/null +++ b/actions.py @@ -0,0 +1,11 @@ +# -*- coding: UTF-8 -*- +import handlers + +def resource_action(resource, action): + handler = resource.metadata['handler'] + handler = handlers.get(handler) + handler().action(resource, action) + +def tag_action(tag, action): + #TODO + pass diff --git a/db.py b/db.py new file mode 100644 index 00000000..5bb6b680 --- /dev/null +++ b/db.py @@ -0,0 +1,11 @@ +# -*- coding: UTF-8 -*- + +RESOURCE_DB = {} + +def resource_add(key, value): + if key in RESOURCE_DB: + raise Exception('Key `{0}` already exists'.format(key)) + RESOURCE_DB[key] = value + +def get_resource(key): + return RESOURCE_DB.get(key, None) diff --git a/handlers.py b/handlers.py new file mode 100644 index 00000000..e957403f --- /dev/null +++ b/handlers.py @@ -0,0 +1,62 @@ +# -*- coding: UTF-8 -*- +import os +import subprocess +import tempfile + +from jinja2 import Template + + +def get(handler_name): + handler = HANDLERS.get(handler_name, None) + if handler: + return handler + raise Exception('Handler {0} does not exist'.format(handler_name)) + + +class Ansible(object): + """TODO""" + def __init__(self): + pass + + def action(self, resource, action): + pass + + def _get_connection(self, resource): + return {'ssh_user': '', + 'ssh_key': '', + 'host': ''} + + def _create_inventory(self, dest_dir): + pass + + def _create_playbook(self, dest_dir): + pass + + +class Shell(object): + def __init__(self): + pass + + def action(self, resource, action): + action_file = resource.metadata['actions'][action] + action_file = os.path.join(resource.base_dir, action_file) + with open(action_file) as f: + tpl = Template(f.read()) + tpl = tpl.render(resource.args) + + tmp_file = tempfile.mkstemp(text=True)[1] + with open(tmp_file, 'w') as f: + f.write(tpl) + + subprocess.call(['bash', tmp_file]) + + +class Empty(object): + def action(self, resource, action): + pass + + +HANDLERS = {'ansible' : Ansible, + 'shell': Shell, + 'none': Empty} + diff --git a/resource.py b/resource.py new file mode 100644 index 00000000..198d0553 --- /dev/null +++ b/resource.py @@ -0,0 +1,73 @@ +# -*- coding: UTF-8 -*- +import os +import shutil + +import yaml + +import actions +import signals +import db + + +class Resource(object): + def __init__(self, name, metadata, args, base_dir): + self.name = name + self.base_dir = base_dir + self.metadata = metadata + self.actions = metadata['actions'].keys() if metadata['actions'] else None + self.requires = metadata['input'].keys() + self._validate_args(args) + self.args = args + self.changed = [] + + def __repr__(self): + return "Resource('name={0}', metadata={1}, args={2}, base_dir='{3}')".format(self.name, + self.metadata, + self.args, + self.base_dir) + + def update(self, args): + for key, value in args.iteritems(): + resource_key = self.args.get(key, None) + if resource_key: + self.args[key] = value + self.changed.append(key) + signals.notify(self, key, value) + + def action(self, action): + if action in self.actions: + actions.resource_action(self, action) + else: + raise Exception('Uuups, action is not available') + + def _validate_args(self, args): + for req in self.requires: + if not req in args: + raise Exception('Requirement `{0}` is missing in args'.format(req)) + + +def create(name, base_path, dest_path, args, connections={}): + if not os.path.exists(base_path): + raise Exception('Base resource does not exist: {0}'.format(dest_path)) + if not os.path.exists(dest_path): + raise Exception('Dest dir does not exist: {0}'.format(dest_path)) + if not os.path.isdir(dest_path): + raise Exception('Dest path is not a directory: {0}'.format(dest_path)) + + dest_path = os.path.join(dest_path, name) + base_meta_file = os.path.join(base_path, 'meta.yaml') + meta_file = os.path.join(dest_path, 'meta.yaml') + + meta = yaml.load(open(base_meta_file).read()) + meta['id'] = name + meta['version'] = '1.0.0' + + resource = Resource(name, meta, args, dest_path) + signals.assign_connections(resource, connections) + + #save + shutil.copytree(base_path, dest_path) + with open(meta_file, 'w') as f: + f.write(yaml.dump(meta)) + db.resource_add(name, resource) + return resource diff --git a/resources/data_container/meta.yaml b/resources/data_container/meta.yaml new file mode 100644 index 00000000..a688994e --- /dev/null +++ b/resources/data_container/meta.yaml @@ -0,0 +1,10 @@ +id: data_container +handler: ansible +version: 1.0.0 +actions: + run: run.yml + remove: remove.yml +input: + host: + image: + export_volumes: diff --git a/resources/data_container/remove.yml b/resources/data_container/remove.yml new file mode 100644 index 00000000..d3c3149f --- /dev/null +++ b/resources/data_container/remove.yml @@ -0,0 +1,6 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/resources/data_container/run.yml b/resources/data_container/run.yml new file mode 100644 index 00000000..f3f601da --- /dev/null +++ b/resources/data_container/run.yml @@ -0,0 +1,6 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker run -d --net="host" --privileged \ + --name {{ name }} {{ image }} /bin/sh diff --git a/resources/docker/docker.yml b/resources/docker/docker.yml new file mode 100644 index 00000000..3b704056 --- /dev/null +++ b/resources/docker/docker.yml @@ -0,0 +1,10 @@ +id: docker +type: resource +handler: ansible +version: v1 +actions: + run: simple/docker/run.yml + remove: simple/docker/remove.yml +input: + base_image: ubuntu +tags: [n/1] diff --git a/resources/docker_container/meta.yaml b/resources/docker_container/meta.yaml new file mode 100644 index 00000000..a9aeded9 --- /dev/null +++ b/resources/docker_container/meta.yaml @@ -0,0 +1,9 @@ +id: container +handler: ansible +version: 1.0.0 +actions: + run: run.yml + remove: remove.yml +input: + image: + volume_binds: diff --git a/resources/docker_container/remove.yml b/resources/docker_container/remove.yml new file mode 100644 index 00000000..d3c3149f --- /dev/null +++ b/resources/docker_container/remove.yml @@ -0,0 +1,6 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/resources/docker_container/run.yml b/resources/docker_container/run.yml new file mode 100644 index 00000000..90ae50dc --- /dev/null +++ b/resources/docker_container/run.yml @@ -0,0 +1,6 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker run -d --net="host" --privileged \ + --name {{ name }} {{ image }} diff --git a/resources/file/meta.yaml b/resources/file/meta.yaml new file mode 100644 index 00000000..30d78dfc --- /dev/null +++ b/resources/file/meta.yaml @@ -0,0 +1,8 @@ +id: file +handler: shell +version: 1.0.0 +actions: + run: run.sh + remove: remove.sh +input: + path: /tmp/test_file diff --git a/resources/file/remove.sh b/resources/file/remove.sh new file mode 100644 index 00000000..dc21c836 --- /dev/null +++ b/resources/file/remove.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +rm {{ path }} diff --git a/resources/file/run.sh b/resources/file/run.sh new file mode 100644 index 00000000..461a550e --- /dev/null +++ b/resources/file/run.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +touch {{ path }} diff --git a/resources/mariadb/meta.yaml b/resources/mariadb/meta.yaml new file mode 100644 index 00000000..b55dd2e2 --- /dev/null +++ b/resources/mariadb/meta.yaml @@ -0,0 +1,9 @@ +id: mariadb +handler: ansible +version: 1.0.0 +actions: + run: run.yml + remove: remove.yml +input: + image: tutum/mariadq +tags: [n/1] diff --git a/resources/mariadb/remove.yml b/resources/mariadb/remove.yml new file mode 100644 index 00000000..d3c3149f --- /dev/null +++ b/resources/mariadb/remove.yml @@ -0,0 +1,6 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/resources/mariadb/run.yml b/resources/mariadb/run.yml new file mode 100644 index 00000000..90ae50dc --- /dev/null +++ b/resources/mariadb/run.yml @@ -0,0 +1,6 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker run -d --net="host" --privileged \ + --name {{ name }} {{ image }} diff --git a/resources/mariadb_table/meta.yaml b/resources/mariadb_table/meta.yaml new file mode 100644 index 00000000..40b92c47 --- /dev/null +++ b/resources/mariadb_table/meta.yaml @@ -0,0 +1,10 @@ +id: mariadb_user +handler: ansible +version: 1.0.0 +actions: + run: run.yml + remove: remove.yml +input: + name: name + password: password + users: [] diff --git a/resources/mariadb_user/meta.yaml b/resources/mariadb_user/meta.yaml new file mode 100644 index 00000000..db859484 --- /dev/null +++ b/resources/mariadb_user/meta.yaml @@ -0,0 +1,9 @@ +id: mariadb_user +handler: ansible +version: 1.0.0 +actions: + run: run.yml + remove: remove.yml +input: + name: name + password: password diff --git a/resources/ro_node/meta.yaml b/resources/ro_node/meta.yaml new file mode 100644 index 00000000..3e9dc663 --- /dev/null +++ b/resources/ro_node/meta.yaml @@ -0,0 +1,8 @@ +id: mariadb +handler: none +version: 1.0.0 +actions: +input: + ip: + ssh_key: + user: diff --git a/signals.py b/signals.py new file mode 100644 index 00000000..adf26622 --- /dev/null +++ b/signals.py @@ -0,0 +1,34 @@ +# -*- coding: UTF-8 -*- +from collections import defaultdict + +import db + +CLIENTS = defaultdict(lambda: defaultdict(list)) + +def connect(emitter, reciver, mappings): + for src, dst in mappings: + CLIENTS[emitter.name][src].append((reciver.name, dst)) + +def notify(source, key, value): + if key in CLIENTS[source.name]: + for client, r_key in CLIENTS[source.name][key]: + resource = db.get_resource(client) + if resource: + resource.update({r_key: value}) + else: + #XXX resource deleted? + pass + +def assign_connections(reciver, connections): + mappings = defaultdict(list) + for key, dest in connections.iteritems(): + resource, r_key = dest.split('.') + resource = db.get_resource(resource) + value = resource.args[r_key] + reciver.args[key] = value + mappings[resource].append((r_key, key)) + for resource, r_mappings in mappings.iteritems(): + connect(resource, reciver, r_mappings) + + + From bf0e8a344f43bef02663c7c328b8a35f4d37f73e Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 16 Apr 2015 08:49:33 +0200 Subject: [PATCH 03/87] Fix paths to match the README example --- __init__.py => x/__init__.py | 0 actions.py => x/actions.py | 0 db.py => x/db.py | 0 handlers.py => x/handlers.py | 0 resource.py => x/resource.py | 0 {resources => x/resources}/data_container/meta.yaml | 0 {resources => x/resources}/data_container/remove.yml | 0 {resources => x/resources}/data_container/run.yml | 0 {resources => x/resources}/docker/docker.yml | 0 {resources => x/resources}/docker_container/meta.yaml | 0 {resources => x/resources}/docker_container/remove.yml | 0 {resources => x/resources}/docker_container/run.yml | 0 {resources => x/resources}/file/meta.yaml | 0 {resources => x/resources}/file/remove.sh | 0 {resources => x/resources}/file/run.sh | 0 {resources => x/resources}/mariadb/meta.yaml | 0 {resources => x/resources}/mariadb/remove.yml | 0 {resources => x/resources}/mariadb/run.yml | 0 {resources => x/resources}/mariadb_table/meta.yaml | 0 {resources => x/resources}/mariadb_user/meta.yaml | 0 {resources => x/resources}/ro_node/meta.yaml | 0 signals.py => x/signals.py | 0 22 files changed, 0 insertions(+), 0 deletions(-) rename __init__.py => x/__init__.py (100%) rename actions.py => x/actions.py (100%) rename db.py => x/db.py (100%) rename handlers.py => x/handlers.py (100%) rename resource.py => x/resource.py (100%) rename {resources => x/resources}/data_container/meta.yaml (100%) rename {resources => x/resources}/data_container/remove.yml (100%) rename {resources => x/resources}/data_container/run.yml (100%) rename {resources => x/resources}/docker/docker.yml (100%) rename {resources => x/resources}/docker_container/meta.yaml (100%) rename {resources => x/resources}/docker_container/remove.yml (100%) rename {resources => x/resources}/docker_container/run.yml (100%) rename {resources => x/resources}/file/meta.yaml (100%) rename {resources => x/resources}/file/remove.sh (100%) rename {resources => x/resources}/file/run.sh (100%) rename {resources => x/resources}/mariadb/meta.yaml (100%) rename {resources => x/resources}/mariadb/remove.yml (100%) rename {resources => x/resources}/mariadb/run.yml (100%) rename {resources => x/resources}/mariadb_table/meta.yaml (100%) rename {resources => x/resources}/mariadb_user/meta.yaml (100%) rename {resources => x/resources}/ro_node/meta.yaml (100%) rename signals.py => x/signals.py (100%) diff --git a/__init__.py b/x/__init__.py similarity index 100% rename from __init__.py rename to x/__init__.py diff --git a/actions.py b/x/actions.py similarity index 100% rename from actions.py rename to x/actions.py diff --git a/db.py b/x/db.py similarity index 100% rename from db.py rename to x/db.py diff --git a/handlers.py b/x/handlers.py similarity index 100% rename from handlers.py rename to x/handlers.py diff --git a/resource.py b/x/resource.py similarity index 100% rename from resource.py rename to x/resource.py diff --git a/resources/data_container/meta.yaml b/x/resources/data_container/meta.yaml similarity index 100% rename from resources/data_container/meta.yaml rename to x/resources/data_container/meta.yaml diff --git a/resources/data_container/remove.yml b/x/resources/data_container/remove.yml similarity index 100% rename from resources/data_container/remove.yml rename to x/resources/data_container/remove.yml diff --git a/resources/data_container/run.yml b/x/resources/data_container/run.yml similarity index 100% rename from resources/data_container/run.yml rename to x/resources/data_container/run.yml diff --git a/resources/docker/docker.yml b/x/resources/docker/docker.yml similarity index 100% rename from resources/docker/docker.yml rename to x/resources/docker/docker.yml diff --git a/resources/docker_container/meta.yaml b/x/resources/docker_container/meta.yaml similarity index 100% rename from resources/docker_container/meta.yaml rename to x/resources/docker_container/meta.yaml diff --git a/resources/docker_container/remove.yml b/x/resources/docker_container/remove.yml similarity index 100% rename from resources/docker_container/remove.yml rename to x/resources/docker_container/remove.yml diff --git a/resources/docker_container/run.yml b/x/resources/docker_container/run.yml similarity index 100% rename from resources/docker_container/run.yml rename to x/resources/docker_container/run.yml diff --git a/resources/file/meta.yaml b/x/resources/file/meta.yaml similarity index 100% rename from resources/file/meta.yaml rename to x/resources/file/meta.yaml diff --git a/resources/file/remove.sh b/x/resources/file/remove.sh similarity index 100% rename from resources/file/remove.sh rename to x/resources/file/remove.sh diff --git a/resources/file/run.sh b/x/resources/file/run.sh similarity index 100% rename from resources/file/run.sh rename to x/resources/file/run.sh diff --git a/resources/mariadb/meta.yaml b/x/resources/mariadb/meta.yaml similarity index 100% rename from resources/mariadb/meta.yaml rename to x/resources/mariadb/meta.yaml diff --git a/resources/mariadb/remove.yml b/x/resources/mariadb/remove.yml similarity index 100% rename from resources/mariadb/remove.yml rename to x/resources/mariadb/remove.yml diff --git a/resources/mariadb/run.yml b/x/resources/mariadb/run.yml similarity index 100% rename from resources/mariadb/run.yml rename to x/resources/mariadb/run.yml diff --git a/resources/mariadb_table/meta.yaml b/x/resources/mariadb_table/meta.yaml similarity index 100% rename from resources/mariadb_table/meta.yaml rename to x/resources/mariadb_table/meta.yaml diff --git a/resources/mariadb_user/meta.yaml b/x/resources/mariadb_user/meta.yaml similarity index 100% rename from resources/mariadb_user/meta.yaml rename to x/resources/mariadb_user/meta.yaml diff --git a/resources/ro_node/meta.yaml b/x/resources/ro_node/meta.yaml similarity index 100% rename from resources/ro_node/meta.yaml rename to x/resources/ro_node/meta.yaml diff --git a/signals.py b/x/signals.py similarity index 100% rename from signals.py rename to x/signals.py From 7f59729b4b4f27fdd8d9d2e4fc51bbdd80d558bc Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 16 Apr 2015 09:14:56 +0200 Subject: [PATCH 04/87] Actions are in separate directory This makes meta.yaml simpler --- x/actions.py | 1 + x/db.py | 2 ++ x/handlers.py | 2 +- x/resource.py | 8 +++++++- x/resources/data_container/{ => actions}/remove.yml | 0 x/resources/data_container/{ => actions}/run.yml | 0 x/resources/data_container/meta.yaml | 3 --- x/resources/docker_container/{ => actions}/remove.yml | 0 x/resources/docker_container/{ => actions}/run.yml | 0 x/resources/docker_container/meta.yaml | 3 --- x/resources/file/{ => actions}/remove.sh | 0 x/resources/file/{ => actions}/run.sh | 0 x/resources/file/meta.yaml | 3 --- x/resources/mariadb/{ => actions}/remove.yml | 0 x/resources/mariadb/{ => actions}/run.yml | 0 x/resources/mariadb/meta.yaml | 3 --- x/signals.py | 4 ++++ 17 files changed, 15 insertions(+), 14 deletions(-) rename x/resources/data_container/{ => actions}/remove.yml (100%) rename x/resources/data_container/{ => actions}/run.yml (100%) rename x/resources/docker_container/{ => actions}/remove.yml (100%) rename x/resources/docker_container/{ => actions}/run.yml (100%) rename x/resources/file/{ => actions}/remove.sh (100%) rename x/resources/file/{ => actions}/run.sh (100%) rename x/resources/mariadb/{ => actions}/remove.yml (100%) rename x/resources/mariadb/{ => actions}/run.yml (100%) diff --git a/x/actions.py b/x/actions.py index 93aa8a19..93285133 100644 --- a/x/actions.py +++ b/x/actions.py @@ -1,6 +1,7 @@ # -*- coding: UTF-8 -*- import handlers + def resource_action(resource, action): handler = resource.metadata['handler'] handler = handlers.get(handler) diff --git a/x/db.py b/x/db.py index 5bb6b680..90df6dfe 100644 --- a/x/db.py +++ b/x/db.py @@ -2,10 +2,12 @@ RESOURCE_DB = {} + def resource_add(key, value): if key in RESOURCE_DB: raise Exception('Key `{0}` already exists'.format(key)) RESOURCE_DB[key] = value + def get_resource(key): return RESOURCE_DB.get(key, None) diff --git a/x/handlers.py b/x/handlers.py index e957403f..a6c17e17 100644 --- a/x/handlers.py +++ b/x/handlers.py @@ -56,7 +56,7 @@ class Empty(object): pass -HANDLERS = {'ansible' : Ansible, +HANDLERS = {'ansible': Ansible, 'shell': Shell, 'none': Empty} diff --git a/x/resource.py b/x/resource.py index 198d0553..9c319cda 100644 --- a/x/resource.py +++ b/x/resource.py @@ -42,7 +42,7 @@ class Resource(object): def _validate_args(self, args): for req in self.requires: - if not req in args: + if req not in args: raise Exception('Requirement `{0}` is missing in args'.format(req)) @@ -57,10 +57,16 @@ def create(name, base_path, dest_path, args, connections={}): dest_path = os.path.join(dest_path, name) base_meta_file = os.path.join(base_path, 'meta.yaml') meta_file = os.path.join(dest_path, 'meta.yaml') + actions_path = os.path.join(base_path, 'actions') meta = yaml.load(open(base_meta_file).read()) meta['id'] = name meta['version'] = '1.0.0' + meta['actions'] = {} + + if os.path.exists(actions_path): + for f in os.listdir(actions_path): + meta['actions'][os.path.splitext(f)[0]] = f resource = Resource(name, meta, args, dest_path) signals.assign_connections(resource, connections) diff --git a/x/resources/data_container/remove.yml b/x/resources/data_container/actions/remove.yml similarity index 100% rename from x/resources/data_container/remove.yml rename to x/resources/data_container/actions/remove.yml diff --git a/x/resources/data_container/run.yml b/x/resources/data_container/actions/run.yml similarity index 100% rename from x/resources/data_container/run.yml rename to x/resources/data_container/actions/run.yml diff --git a/x/resources/data_container/meta.yaml b/x/resources/data_container/meta.yaml index a688994e..8b4f3459 100644 --- a/x/resources/data_container/meta.yaml +++ b/x/resources/data_container/meta.yaml @@ -1,9 +1,6 @@ id: data_container handler: ansible version: 1.0.0 -actions: - run: run.yml - remove: remove.yml input: host: image: diff --git a/x/resources/docker_container/remove.yml b/x/resources/docker_container/actions/remove.yml similarity index 100% rename from x/resources/docker_container/remove.yml rename to x/resources/docker_container/actions/remove.yml diff --git a/x/resources/docker_container/run.yml b/x/resources/docker_container/actions/run.yml similarity index 100% rename from x/resources/docker_container/run.yml rename to x/resources/docker_container/actions/run.yml diff --git a/x/resources/docker_container/meta.yaml b/x/resources/docker_container/meta.yaml index a9aeded9..4170f334 100644 --- a/x/resources/docker_container/meta.yaml +++ b/x/resources/docker_container/meta.yaml @@ -1,9 +1,6 @@ id: container handler: ansible version: 1.0.0 -actions: - run: run.yml - remove: remove.yml input: image: volume_binds: diff --git a/x/resources/file/remove.sh b/x/resources/file/actions/remove.sh similarity index 100% rename from x/resources/file/remove.sh rename to x/resources/file/actions/remove.sh diff --git a/x/resources/file/run.sh b/x/resources/file/actions/run.sh similarity index 100% rename from x/resources/file/run.sh rename to x/resources/file/actions/run.sh diff --git a/x/resources/file/meta.yaml b/x/resources/file/meta.yaml index 30d78dfc..14eb2e2c 100644 --- a/x/resources/file/meta.yaml +++ b/x/resources/file/meta.yaml @@ -1,8 +1,5 @@ id: file handler: shell version: 1.0.0 -actions: - run: run.sh - remove: remove.sh input: path: /tmp/test_file diff --git a/x/resources/mariadb/remove.yml b/x/resources/mariadb/actions/remove.yml similarity index 100% rename from x/resources/mariadb/remove.yml rename to x/resources/mariadb/actions/remove.yml diff --git a/x/resources/mariadb/run.yml b/x/resources/mariadb/actions/run.yml similarity index 100% rename from x/resources/mariadb/run.yml rename to x/resources/mariadb/actions/run.yml diff --git a/x/resources/mariadb/meta.yaml b/x/resources/mariadb/meta.yaml index b55dd2e2..8fcdb87c 100644 --- a/x/resources/mariadb/meta.yaml +++ b/x/resources/mariadb/meta.yaml @@ -1,9 +1,6 @@ id: mariadb handler: ansible version: 1.0.0 -actions: - run: run.yml - remove: remove.yml input: image: tutum/mariadq tags: [n/1] diff --git a/x/signals.py b/x/signals.py index adf26622..7e76d11a 100644 --- a/x/signals.py +++ b/x/signals.py @@ -3,12 +3,15 @@ from collections import defaultdict import db + CLIENTS = defaultdict(lambda: defaultdict(list)) + def connect(emitter, reciver, mappings): for src, dst in mappings: CLIENTS[emitter.name][src].append((reciver.name, dst)) + def notify(source, key, value): if key in CLIENTS[source.name]: for client, r_key in CLIENTS[source.name][key]: @@ -19,6 +22,7 @@ def notify(source, key, value): #XXX resource deleted? pass + def assign_connections(reciver, connections): mappings = defaultdict(list) for key, dest in connections.iteritems(): From 2e82c6036c4d9691f81ed2a4afc560644c3c8c3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Thu, 16 Apr 2015 08:34:17 +0000 Subject: [PATCH 05/87] todo --- TODO | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 TODO diff --git a/TODO b/TODO new file mode 100644 index 00000000..9f317770 --- /dev/null +++ b/TODO @@ -0,0 +1,6 @@ +- Å‚aczymy automatycznie po nazwach +- graf jest budowany z CLIENTS, CLIENTS zapisywane jako JSON +- tagi trzymane w resource +- ansible handler (loles) +- cli +- szablony configow From 8f4ec0bab76f333546861a0b1db99eb8785e485a Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 16 Apr 2015 11:34:18 +0200 Subject: [PATCH 06/87] CLIENTS and Resources are saved and loaded --- config.yaml | 1 + x/resource.py | 13 +++++++++++++ x/signals.py | 17 ++++++++++++++++- x/utils.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 config.yaml create mode 100644 x/utils.py diff --git a/config.yaml b/config.yaml new file mode 100644 index 00000000..29183d5b --- /dev/null +++ b/config.yaml @@ -0,0 +1 @@ +clients-data-file: /vagrant/clients.json diff --git a/x/resource.py b/x/resource.py index 9c319cda..5722c238 100644 --- a/x/resource.py +++ b/x/resource.py @@ -8,6 +8,8 @@ import actions import signals import db +from x import utils + class Resource(object): def __init__(self, name, metadata, args, base_dir): @@ -63,6 +65,7 @@ def create(name, base_path, dest_path, args, connections={}): meta['id'] = name meta['version'] = '1.0.0' meta['actions'] = {} + meta['input'] = args if os.path.exists(actions_path): for f in os.listdir(actions_path): @@ -77,3 +80,13 @@ def create(name, base_path, dest_path, args, connections={}): f.write(yaml.dump(meta)) db.resource_add(name, resource) return resource + + +def load(dest_path): + meta_file = os.path.join(dest_path, 'meta.yaml') + meta = utils.load_file(meta_file) + name = meta['id'] + args = meta['input'] + + return Resource(name, meta, args, dest_path) + diff --git a/x/signals.py b/x/signals.py index 7e76d11a..409280dc 100644 --- a/x/signals.py +++ b/x/signals.py @@ -3,14 +3,21 @@ from collections import defaultdict import db +from x import utils -CLIENTS = defaultdict(lambda: defaultdict(list)) + +CLIENTS_CONFIG_KEY = 'clients-data-file' +CLIENTS = utils.read_config_file(CLIENTS_CONFIG_KEY) def connect(emitter, reciver, mappings): for src, dst in mappings: + CLIENTS.setdefault(emitter.name, {}) + CLIENTS[emitter.name].setdefault(src, []) CLIENTS[emitter.name][src].append((reciver.name, dst)) + utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + def notify(source, key, value): if key in CLIENTS[source.name]: @@ -35,4 +42,12 @@ def assign_connections(reciver, connections): connect(resource, reciver, r_mappings) +def connection_graph(): + resource_dependencies = {} + for source, destinations in CLIENTS.items(): + resource_dependencies[source] = [ + destination[0] for destination in destinations + ] + + return resource_dependencies diff --git a/x/utils.py b/x/utils.py new file mode 100644 index 00000000..04b9873f --- /dev/null +++ b/x/utils.py @@ -0,0 +1,42 @@ +import json +import os +import yaml + + +def ext_encoder(fpath): + ext = os.path.splitext(os.path.basename(fpath))[1].strip('.') + if ext in ['json']: + return json + elif ext in ['yaml', 'yml']: + return yaml + + raise Exception('Unknown extension {}'.format(ext)) + + +def load_file(fpath): + encoder = ext_encoder(fpath) + + try: + with open(fpath) as f: + return encoder.load(f) + except IOError: + return {} + + +def read_config(): + return load_file('/vagrant/config.yaml') + + +def read_config_file(key): + fpath = read_config()[key] + + return load_file(fpath) + + +def save_to_config_file(key, data): + fpath = read_config()[key] + + with open(fpath, 'w') as f: + encoder = ext_encoder(fpath) + encoder.dump(data, f) + From b064ef5a18399cede4a2ed2d2949b1c5ea268b70 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 16 Apr 2015 12:03:59 +0200 Subject: [PATCH 07/87] Fixed resource saving, option to load all resources --- README => README.md | 45 ++++++++++++++++++++++++++++++++++++++++----- x/resource.py | 16 +++++++++++++++- x/signals.py | 1 + 3 files changed, 56 insertions(+), 6 deletions(-) rename README => README.md (58%) diff --git a/README b/README.md similarity index 58% rename from README rename to README.md index 60333668..854776ad 100644 --- a/README +++ b/README.md @@ -1,15 +1,50 @@ Usage: Creating resources: +``` from x import resource node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'user':'vagrant'}) + node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'user':'vagrant'}) + keystone_db_data = resource.create('mariadb_keystone_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'remote_user': '', 'ssh_key': ''}, connections={'host' : 'node2.ip', 'ssh_key':'node2.ssh_key', 'remote_user':'node2.user'}) + nova_db_data = resource.create('mariadb_nova_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'remote_user': '', 'ssh_key': ''}, connections={'host' : 'node1.ip', 'ssh_key':'node1.ssh_key', 'remote_user':'node1.user'}) +``` -to make connection after resource is created use signal.connect +to make connection after resource is created use `signal.connect` -*** WARNNING *** -Resource DB is stored only in memory, if you close python interpretet you will lost it. -It can be recreated from resources but it's not done yet. -Connections are stored only in memory. It can be easly dumped as JSON file +To test notifications: + +``` +keystone_db_data.args # displays node2 IP + +node2.update({'ip': '10.0.0.5'}) + +keystone_db_data.args # updated IP +``` + +If you close the Python shell you can load the resources like this: + +``` +from x import resource + +node1 = resource.load('rs/node1') + +node2 = resource.load('rs/node2') + +keystone_db_data = resource.load('rs/mariadn_keystone_data') + +nova_db_data = resource.load('rs/mariadb_nova_data') +``` + +Connections are loaded automatically. + + +You can also load all resources at once: + +``` +from x import resource + +all_resources = resource.load_all('rs') +``` diff --git a/x/resource.py b/x/resource.py index 5722c238..391804f2 100644 --- a/x/resource.py +++ b/x/resource.py @@ -88,5 +88,19 @@ def load(dest_path): name = meta['id'] args = meta['input'] - return Resource(name, meta, args, dest_path) + resource = Resource(name, meta, args, dest_path) + db.resource_add(name, resource) + + return resource + + +def load_all(dest_path): + ret = {} + + for name in os.listdir(dest_path): + resource_path = os.path.join(dest_path, name) + resource = load(resource_path) + ret[resource.name] = resource + + return ret diff --git a/x/signals.py b/x/signals.py index 409280dc..08e10661 100644 --- a/x/signals.py +++ b/x/signals.py @@ -20,6 +20,7 @@ def connect(emitter, reciver, mappings): def notify(source, key, value): + CLIENTS.setdefault(source.name, []) if key in CLIENTS[source.name]: for client, r_key in CLIENTS[source.name][key]: resource = db.get_resource(client) From f246c07311bb25f74fad2a1a9cdd195354a3c66c Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 16 Apr 2015 12:48:53 +0200 Subject: [PATCH 08/87] Networkx added, graph created from CLIENTS --- TODO | 6 ------ TODO.md | 10 ++++++++++ requirements.txt | 2 ++ x/signals.py | 27 ++++++++++++++++++++++----- 4 files changed, 34 insertions(+), 11 deletions(-) delete mode 100644 TODO create mode 100644 TODO.md create mode 100644 requirements.txt diff --git a/TODO b/TODO deleted file mode 100644 index 9f317770..00000000 --- a/TODO +++ /dev/null @@ -1,6 +0,0 @@ -- Å‚aczymy automatycznie po nazwach -- graf jest budowany z CLIENTS, CLIENTS zapisywane jako JSON -- tagi trzymane w resource -- ansible handler (loles) -- cli -- szablony configow diff --git a/TODO.md b/TODO.md new file mode 100644 index 00000000..601e669d --- /dev/null +++ b/TODO.md @@ -0,0 +1,10 @@ +# TODO + +- Å‚Ä…czymy automatycznie po nazwach (pkaminski) +- tagi trzymane w resource +- ansible handler (loles) +- cli +- szablony configow + +# DONE +- graf jest budowany z CLIENTS, CLIENTS zapisywane jako JSON (pkaminski) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..0330efe9 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +click==4.0 +networkx==1.9.1 diff --git a/x/signals.py b/x/signals.py index 08e10661..2877900c 100644 --- a/x/signals.py +++ b/x/signals.py @@ -1,5 +1,7 @@ # -*- coding: UTF-8 -*- from collections import defaultdict +import itertools +import networkx as nx import db @@ -46,9 +48,24 @@ def assign_connections(reciver, connections): def connection_graph(): resource_dependencies = {} - for source, destinations in CLIENTS.items(): - resource_dependencies[source] = [ - destination[0] for destination in destinations - ] + for source, destination_values in CLIENTS.items(): + resource_dependencies.setdefault(source, set()) + for src, destinations in destination_values.items(): + resource_dependencies[source].update([ + destination[0] for destination in destinations + ]) - return resource_dependencies + g = nx.DiGraph() + + # TODO: tags as graph node attributes + for source, destinations in resource_dependencies.items(): + g.add_node(source) + g.add_nodes_from(destinations) + g.add_edges_from( + itertools.izip( + itertools.repeat(source), + destinations + ) + ) + + return g From 46a00a1347af2057732534acec2d61903b0771b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Thu, 16 Apr 2015 11:26:48 +0000 Subject: [PATCH 09/87] Translate todo to english --- TODO.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/TODO.md b/TODO.md index 601e669d..3c757b15 100644 --- a/TODO.md +++ b/TODO.md @@ -1,10 +1,10 @@ # TODO -- Å‚Ä…czymy automatycznie po nazwach (pkaminski) -- tagi trzymane w resource +- connections are made automaticly(pkaminski) +- tags are kept in resource mata file - ansible handler (loles) - cli -- szablony configow +- config templates # DONE -- graf jest budowany z CLIENTS, CLIENTS zapisywane jako JSON (pkaminski) +- graph is build from CLIENT dict, clients are stored in JSON file (pkaminski) From aea078c18c04d346ed42c45150709e21c168eec1 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 16 Apr 2015 13:55:15 +0200 Subject: [PATCH 10/87] Guessing of resource keys, automatic connection --- README.md | 8 ++--- x/resources/data_container/meta.yaml | 2 +- x/resources/ro_node/meta.yaml | 2 +- x/signals.py | 44 ++++++++++++++++++++++++++-- 4 files changed, 48 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 854776ad..15176b25 100644 --- a/README.md +++ b/README.md @@ -3,13 +3,13 @@ Creating resources: ``` from x import resource -node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'user':'vagrant'}) +node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) -node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'user':'vagrant'}) +node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) -keystone_db_data = resource.create('mariadb_keystone_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'remote_user': '', 'ssh_key': ''}, connections={'host' : 'node2.ip', 'ssh_key':'node2.ssh_key', 'remote_user':'node2.user'}) +keystone_db_data = resource.create('mariadb_keystone_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'ssh_user': '', 'ssh_key': ''}, connections={'host' : 'node2.ip', 'ssh_key':'node2.ssh_key', 'ssh_user':'node2.ssh_user'}) -nova_db_data = resource.create('mariadb_nova_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'remote_user': '', 'ssh_key': ''}, connections={'host' : 'node1.ip', 'ssh_key':'node1.ssh_key', 'remote_user':'node1.user'}) +nova_db_data = resource.create('mariadb_nova_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'ssh_user': '', 'ssh_key': ''}, connections={'host' : 'node1.ip', 'ssh_key':'node1.ssh_key', 'ssh_user':'node1.ssh_user'}) ``` to make connection after resource is created use `signal.connect` diff --git a/x/resources/data_container/meta.yaml b/x/resources/data_container/meta.yaml index 8b4f3459..d4185a1a 100644 --- a/x/resources/data_container/meta.yaml +++ b/x/resources/data_container/meta.yaml @@ -2,6 +2,6 @@ id: data_container handler: ansible version: 1.0.0 input: - host: + ip: image: export_volumes: diff --git a/x/resources/ro_node/meta.yaml b/x/resources/ro_node/meta.yaml index 3e9dc663..1ceaa0fc 100644 --- a/x/resources/ro_node/meta.yaml +++ b/x/resources/ro_node/meta.yaml @@ -5,4 +5,4 @@ actions: input: ip: ssh_key: - user: + ssh_user: diff --git a/x/signals.py b/x/signals.py index 2877900c..a9542fd4 100644 --- a/x/signals.py +++ b/x/signals.py @@ -12,11 +12,51 @@ CLIENTS_CONFIG_KEY = 'clients-data-file' CLIENTS = utils.read_config_file(CLIENTS_CONFIG_KEY) -def connect(emitter, reciver, mappings): +def guess_mappings(emitter, receiver): + """Guess connection mapping between emitter and receiver. + + Suppose emitter and receiver have inputs: + ip, ssh_key, ssh_user + + Then we return a connection mapping like this: + + { + 'ip': '.ip', + 'ssh_key': '.ssh_key', + 'ssh_user': '.ssh_user' + } + + If receiver accepts inputs that are not present in emitter, + error is thrown -- such cases require manual intervention. + + :param emitter: + :param receiver: + :return: + """ + + ret = {} + + diff = set(receiver.requires).difference(emitter.requires) + if diff: + raise Exception( + 'The following inputs are not provided by emitter: {}.' + 'You need to set the connection manually.'.format(diff) + ) + + for key in receiver.requires: + ret[key] = '{}.{}'.format(emitter.name, key) + + return ret + + +def connect(emitter, receiver, mappings=None): + if mappings is None: + mappings = guess_mappings(emitter, receiver) + for src, dst in mappings: CLIENTS.setdefault(emitter.name, {}) CLIENTS[emitter.name].setdefault(src, []) - CLIENTS[emitter.name][src].append((reciver.name, dst)) + CLIENTS[emitter.name][src].append((receiver.name, dst)) utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) From a4e0fa580e419a592db7e6e4d1d0eff3af4a99fc Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 16 Apr 2015 18:36:59 +0200 Subject: [PATCH 11/87] Basic CLI implementation with Click --- README.md | 32 +++++++++++++++++- cli.py | 85 ++++++++++++++++++++++++++++++++++++++++++++++++ requirements.txt | 2 ++ x/resource.py | 5 +-- x/signals.py | 10 +++--- 5 files changed, 126 insertions(+), 8 deletions(-) create mode 100644 cli.py diff --git a/README.md b/README.md index 15176b25..4cbddf53 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,10 @@ -Usage: +## Usage: + Creating resources: ``` from x import resource + node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) @@ -48,3 +50,31 @@ from x import resource all_resources = resource.load_all('rs') ``` + +## CLI + +You can do the above from the command-line client: + +``` +cd /vagrant + +python cli.py resource create node1 x/resources/ro_node/ rs/ '{"ip":"10.0.0.3", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' + +python cli.py resource create node2 x/resources/ro_node/ rs/ '{"ip":"10.0.0.4", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' + +python cli.py resource create mariadb_keystone_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "host": "", "ssh_user": "", "ssh_key": ""}' + +python cli.py resource create mariadb_nova_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "host": "", "ssh_user": "", "ssh_key": ""}' + +# View resources +python cli.py resource show rs/mariadb_keystone_data + +# Connect resources +python cli.py connect rs/mariadb_keystone_data rs/node2 --mapping '{"host" : "node2.ip", "ssh_key":"node2.ssh_key", "ssh_user":"node2.ssh_user"}' + +python cli.py connect rs/mariadb_nova_data rs/node1 --mapping '{"host" : "node1.ip", "ssh_key":"node1.ssh_key", "ssh_user":"node1.ssh_user"}' + +# View connections +python cli.py connections show +python cli.py connections graph +``` diff --git a/cli.py b/cli.py new file mode 100644 index 00000000..ce803853 --- /dev/null +++ b/cli.py @@ -0,0 +1,85 @@ +import click +import json +import networkx as nx + +from x import resource as xr +from x import signals as xs + + +@click.group() +def cli(): + pass + + +def init_cli_resource(): + @click.group() + def resource(): + pass + + cli.add_command(resource) + + @click.command() + @click.argument('name') + @click.argument('base_path') + @click.argument('dest_path') + @click.argument('args') + def create(args, dest_path, base_path, name): + print 'create', name, base_path, dest_path, args + args = json.loads(args) + xr.create(name, base_path, dest_path, args) + + resource.add_command(create) + + @click.command() + @click.argument('path') + def show(path): + print xr.load(path) + + resource.add_command(show) + + +def init_cli_connect(): + @click.command() + @click.argument('emitter') + @click.argument('receiver') + @click.option('--mapping', default=None) + def connect(mapping, receiver, emitter): + print 'Connect', emitter, receiver + emitter = xr.load(emitter) + receiver = xr.load(receiver) + print emitter + print receiver + if mapping is not None: + mapping = json.loads(mapping) + xs.connect(emitter, receiver, mapping=mapping) + + cli.add_command(connect) + + +def init_cli_connections(): + @click.group() + def connections(): + pass + + cli.add_command(connections) + + @click.command() + def show(): + print json.dumps(xs.CLIENTS, indent=2) + + connections.add_command(show) + + # TODO: this requires graphing libraries + #@click.command() + #def graph(): + # nx.draw_graphviz(xs.connection_graph()) + + #connections.add_command(graph) + + +if __name__ == '__main__': + init_cli_resource() + init_cli_connect() + init_cli_connections() + + cli() diff --git a/requirements.txt b/requirements.txt index 0330efe9..ca7d2def 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,4 @@ click==4.0 +jinja2==2.7.3 networkx==1.9.1 +PyYAML==3.11 diff --git a/x/resource.py b/x/resource.py index 391804f2..a58d7712 100644 --- a/x/resource.py +++ b/x/resource.py @@ -1,4 +1,5 @@ # -*- coding: UTF-8 -*- +import json import os import shutil @@ -24,8 +25,8 @@ class Resource(object): def __repr__(self): return "Resource('name={0}', metadata={1}, args={2}, base_dir='{3}')".format(self.name, - self.metadata, - self.args, + json.dumps(self.metadata), + json.dumps(self.args), self.base_dir) def update(self, args): diff --git a/x/signals.py b/x/signals.py index a9542fd4..85d1bb26 100644 --- a/x/signals.py +++ b/x/signals.py @@ -12,7 +12,7 @@ CLIENTS_CONFIG_KEY = 'clients-data-file' CLIENTS = utils.read_config_file(CLIENTS_CONFIG_KEY) -def guess_mappings(emitter, receiver): +def guess_mapping(emitter, receiver): """Guess connection mapping between emitter and receiver. Suppose emitter and receiver have inputs: @@ -49,11 +49,11 @@ def guess_mappings(emitter, receiver): return ret -def connect(emitter, receiver, mappings=None): - if mappings is None: - mappings = guess_mappings(emitter, receiver) +def connect(emitter, receiver, mapping=None): + if mapping is None: + mapping = guess_mapping(emitter, receiver) - for src, dst in mappings: + for src, dst in mapping.items(): CLIENTS.setdefault(emitter.name, {}) CLIENTS[emitter.name].setdefault(src, []) CLIENTS[emitter.name][src].append((receiver.name, dst)) From c43be7fe5f49a8a2e2194a01a1f416bd634de3e3 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 16 Apr 2015 18:39:06 +0200 Subject: [PATCH 12/87] TODO: added deployment task --- TODO.md | 1 + 1 file changed, 1 insertion(+) diff --git a/TODO.md b/TODO.md index 3c757b15..5824e99a 100644 --- a/TODO.md +++ b/TODO.md @@ -5,6 +5,7 @@ - ansible handler (loles) - cli - config templates +- Deploy HAProxy, Keystone and MariaDB # DONE - graph is build from CLIENT dict, clients are stored in JSON file (pkaminski) From d6bbb652e6c6d322cf1aae64e79fcf5f05ded88f Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Fri, 17 Apr 2015 11:21:23 +0200 Subject: [PATCH 13/87] Disconnect functionality --- README.md | 3 +++ cli.py | 13 +++++++++++++ x/signals.py | 10 ++++++++++ 3 files changed, 26 insertions(+) diff --git a/README.md b/README.md index 4cbddf53..79327b87 100644 --- a/README.md +++ b/README.md @@ -77,4 +77,7 @@ python cli.py connect rs/mariadb_nova_data rs/node1 --mapping '{"host" : "node1. # View connections python cli.py connections show python cli.py connections graph + +# Disconnect +python cli.py disconnect rs/mariadb_nova_data rs/node1 ``` diff --git a/cli.py b/cli.py index ce803853..dab27e11 100644 --- a/cli.py +++ b/cli.py @@ -55,6 +55,19 @@ def init_cli_connect(): cli.add_command(connect) + @click.command() + @click.argument('emitter') + @click.argument('receiver') + def disconnect(receiver, emitter): + print 'Disconnect', emitter, receiver + emitter = xr.load(emitter) + receiver = xr.load(receiver) + print emitter + print receiver + xs.disconnect(emitter, receiver) + + cli.add_command(disconnect) + def init_cli_connections(): @click.group() diff --git a/x/signals.py b/x/signals.py index 85d1bb26..bb9f6210 100644 --- a/x/signals.py +++ b/x/signals.py @@ -61,6 +61,16 @@ def connect(emitter, receiver, mapping=None): utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) +def disconnect(emitter, receiver): + for src, destinations in CLIENTS[emitter.name].items(): + CLIENTS[emitter.name][src] = [ + destination for destination in destinations + if destination[0] != receiver.name + ] + + utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + + def notify(source, key, value): CLIENTS.setdefault(source.name, []) if key in CLIENTS[source.name]: From 5d2f7339a9f34cbd88c4379fb54081ff6b3b27e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Fri, 17 Apr 2015 09:53:17 +0000 Subject: [PATCH 14/87] Add new item to TODO --- TODO.md | 1 + 1 file changed, 1 insertion(+) diff --git a/TODO.md b/TODO.md index 5824e99a..645ec305 100644 --- a/TODO.md +++ b/TODO.md @@ -6,6 +6,7 @@ - cli - config templates - Deploy HAProxy, Keystone and MariaDB +- Handler also can requires some data, for example ansible: ip, ssh_key, ssh_user # DONE - graph is build from CLIENT dict, clients are stored in JSON file (pkaminski) From 1c47ea6ed74e04052ea0db9394b2a8cea3ac6cbb Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Fri, 17 Apr 2015 12:55:29 +0200 Subject: [PATCH 15/87] Disconnect method added, refactoring, added CLI update --- README.md | 16 ++++++++++------ cli.py | 17 +++++++++++++++++ x/resource.py | 19 ++++++++++++------- x/signals.py | 38 +++++++++++++++----------------------- 4 files changed, 54 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index 79327b87..7c115a03 100644 --- a/README.md +++ b/README.md @@ -9,9 +9,9 @@ node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3' node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) -keystone_db_data = resource.create('mariadb_keystone_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'ssh_user': '', 'ssh_key': ''}, connections={'host' : 'node2.ip', 'ssh_key':'node2.ssh_key', 'ssh_user':'node2.ssh_user'}) +keystone_db_data = resource.create('mariadb_keystone_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'ip': '', 'ssh_user': '', 'ssh_key': ''}, connections={'ip' : 'node2.ip', 'ssh_key':'node2.ssh_key', 'ssh_user':'node2.ssh_user'}) -nova_db_data = resource.create('mariadb_nova_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'host': '', 'ssh_user': '', 'ssh_key': ''}, connections={'host' : 'node1.ip', 'ssh_key':'node1.ssh_key', 'ssh_user':'node1.ssh_user'}) +nova_db_data = resource.create('mariadb_nova_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'ip': '', 'ssh_user': '', 'ssh_key': ''}, connections={'ip' : 'node1.ip', 'ssh_key':'node1.ssh_key', 'ssh_user':'node1.ssh_user'}) ``` to make connection after resource is created use `signal.connect` @@ -62,17 +62,21 @@ python cli.py resource create node1 x/resources/ro_node/ rs/ '{"ip":"10.0.0.3", python cli.py resource create node2 x/resources/ro_node/ rs/ '{"ip":"10.0.0.4", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' -python cli.py resource create mariadb_keystone_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "host": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create mariadb_keystone_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' -python cli.py resource create mariadb_nova_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "host": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create mariadb_nova_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' # View resources python cli.py resource show rs/mariadb_keystone_data # Connect resources -python cli.py connect rs/mariadb_keystone_data rs/node2 --mapping '{"host" : "node2.ip", "ssh_key":"node2.ssh_key", "ssh_user":"node2.ssh_user"}' +python cli.py connect rs/node2 rs/mariadb_keystone_data -python cli.py connect rs/mariadb_nova_data rs/node1 --mapping '{"host" : "node1.ip", "ssh_key":"node1.ssh_key", "ssh_user":"node1.ssh_user"}' +python cli.py connect rs/node1 rs/mariadb_nova_data + +# Test update +python cli.py update rs/node2 '{"ip": "1.1.1.1"}' +python cli.py resource show rs/mariadb_keystone_data # --> IP is 1.1.1.1 # View connections python cli.py connections show diff --git a/cli.py b/cli.py index dab27e11..9e35dd9b 100644 --- a/cli.py +++ b/cli.py @@ -1,6 +1,7 @@ import click import json import networkx as nx +import os from x import resource as xr from x import signals as xs @@ -37,6 +38,22 @@ def init_cli_resource(): resource.add_command(show) + @click.command() + @click.argument('path') + @click.argument('args') + def update(args, path): + print 'Update', path, args + args = json.loads(args) + # Need to load all resources for bubbling effect to take place + # TODO: resources can be scattered around, this is a simple + # situation when we assume resources are all in one directory + base_path, name = os.path.split(path) + all = xr.load_all(base_path) + r = all[name] + r.update(args) + + resource.add_command(update) + def init_cli_connect(): @click.command() diff --git a/x/resource.py b/x/resource.py index a58d7712..2dd4db51 100644 --- a/x/resource.py +++ b/x/resource.py @@ -31,11 +31,11 @@ class Resource(object): def update(self, args): for key, value in args.iteritems(): - resource_key = self.args.get(key, None) - if resource_key: - self.args[key] = value - self.changed.append(key) - signals.notify(self, key, value) + self.args[key] = value + self.changed.append(key) + signals.notify(self, key, value) + + self.save() def action(self, action): if action in self.actions: @@ -48,6 +48,12 @@ class Resource(object): if req not in args: raise Exception('Requirement `{0}` is missing in args'.format(req)) + # TODO: versioning + def save(self): + meta_file = os.path.join(self.base_dir, 'meta.yaml') + with open(meta_file, 'w') as f: + f.write(yaml.dump(self.metadata)) + def create(name, base_path, dest_path, args, connections={}): if not os.path.exists(base_path): @@ -77,8 +83,7 @@ def create(name, base_path, dest_path, args, connections={}): #save shutil.copytree(base_path, dest_path) - with open(meta_file, 'w') as f: - f.write(yaml.dump(meta)) + resource.save() db.resource_add(name, resource) return resource diff --git a/x/signals.py b/x/signals.py index bb9f6210..59ba2e09 100644 --- a/x/signals.py +++ b/x/signals.py @@ -26,34 +26,24 @@ def guess_mapping(emitter, receiver): 'ssh_user': '.ssh_user' } - If receiver accepts inputs that are not present in emitter, - error is thrown -- such cases require manual intervention. - :param emitter: :param receiver: :return: """ + guessed = {} + for key in emitter.requires: + if key in receiver.requires: + guessed[key] = '{}.{}'.format(emitter.name, key) - ret = {} - - diff = set(receiver.requires).difference(emitter.requires) - if diff: - raise Exception( - 'The following inputs are not provided by emitter: {}.' - 'You need to set the connection manually.'.format(diff) - ) - - for key in receiver.requires: - ret[key] = '{}.{}'.format(emitter.name, key) - - return ret + return guessed def connect(emitter, receiver, mapping=None): - if mapping is None: - mapping = guess_mapping(emitter, receiver) + mapping = mapping or {} + guessed = guess_mapping(emitter, receiver) + guessed.update(mapping) - for src, dst in mapping.items(): + for src, dst in guessed.items(): CLIENTS.setdefault(emitter.name, {}) CLIENTS[emitter.name].setdefault(src, []) CLIENTS[emitter.name][src].append((receiver.name, dst)) @@ -73,26 +63,28 @@ def disconnect(emitter, receiver): def notify(source, key, value): CLIENTS.setdefault(source.name, []) + print 'Notify', source.name, key, value, CLIENTS[source.name] if key in CLIENTS[source.name]: for client, r_key in CLIENTS[source.name][key]: resource = db.get_resource(client) + print 'Resource found', client if resource: resource.update({r_key: value}) else: - #XXX resource deleted? + print 'Resource {} deleted?'.format(client) pass -def assign_connections(reciver, connections): +def assign_connections(receiver, connections): mappings = defaultdict(list) for key, dest in connections.iteritems(): resource, r_key = dest.split('.') resource = db.get_resource(resource) value = resource.args[r_key] - reciver.args[key] = value + receiver.args[key] = value mappings[resource].append((r_key, key)) for resource, r_mappings in mappings.iteritems(): - connect(resource, reciver, r_mappings) + connect(resource, receiver, r_mappings) def connection_graph(): From f7de6b97974472c43cf819f94206461c247ba8f1 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Fri, 17 Apr 2015 12:58:35 +0200 Subject: [PATCH 16/87] TODO updated --- TODO.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/TODO.md b/TODO.md index 5824e99a..ef8e4ded 100644 --- a/TODO.md +++ b/TODO.md @@ -1,6 +1,7 @@ # TODO -- connections are made automaticly(pkaminski) +- store all resource configurations somewhere globally (this is required to + correctly perform an update on one resource and bubble down to all others) - tags are kept in resource mata file - ansible handler (loles) - cli @@ -8,4 +9,5 @@ - Deploy HAProxy, Keystone and MariaDB # DONE +- connections are made automaticly(pkaminski) - graph is build from CLIENT dict, clients are stored in JSON file (pkaminski) From a4fe14aae919dbd02b65a4e0134cc612bb0d8bd6 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Fri, 17 Apr 2015 13:24:30 +0200 Subject: [PATCH 17/87] Added missing main.yml, docker.yml for Astute, fixed graph outputting --- README.md | 3 +++ cli.py | 17 +++++++++++++---- docker.yml | 8 ++++++++ main.yml | 19 +++++++++++++++++++ 4 files changed, 43 insertions(+), 4 deletions(-) create mode 100644 docker.yml create mode 100644 main.yml diff --git a/README.md b/README.md index 7c115a03..96ac7285 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,9 @@ python cli.py resource show rs/mariadb_keystone_data # --> IP is 1.1.1.1 # View connections python cli.py connections show + +# Outputs graph to 'graph.png' file, please note that arrows don't have "normal" pointers, but just the line is thicker +# please see http://networkx.lanl.gov/_modules/networkx/drawing/nx_pylab.html python cli.py connections graph # Disconnect diff --git a/cli.py b/cli.py index 9e35dd9b..b3432f00 100644 --- a/cli.py +++ b/cli.py @@ -1,5 +1,8 @@ import click import json +import matplotlib +matplotlib.use('Agg') # don't show windows +import matplotlib.pyplot as plt import networkx as nx import os @@ -100,11 +103,17 @@ def init_cli_connections(): connections.add_command(show) # TODO: this requires graphing libraries - #@click.command() - #def graph(): - # nx.draw_graphviz(xs.connection_graph()) + @click.command() + def graph(): + g = xs.connection_graph() + pos = nx.spring_layout(g) + nx.draw_networkx_nodes(g, pos) + nx.draw_networkx_edges(g, pos, arrows=True) + nx.draw_networkx_labels(g, pos) + plt.axis('off') + plt.savefig('graph.png') - #connections.add_command(graph) + connections.add_command(graph) if __name__ == '__main__': diff --git a/docker.yml b/docker.yml new file mode 100644 index 00000000..f4c70229 --- /dev/null +++ b/docker.yml @@ -0,0 +1,8 @@ +- hosts: all + sudo: yes + tasks: + - shell: docker --version + ignore_errors: true + register: docker_version + - shell: curl -sSL https://get.docker.com/ | sudo sh + when: docker_version | failed diff --git a/main.yml b/main.yml new file mode 100644 index 00000000..17fc5b8d --- /dev/null +++ b/main.yml @@ -0,0 +1,19 @@ +--- + +- hosts: all + sudo: yes + tasks: + # Setup additional development tools + - apt: name=vim state=present + - apt: name=tmux state=present + - apt: name=htop state=present + - apt: name=python-virtualenv state=present + - apt: name=virtualenvwrapper state=present + - apt: name=ipython state=present + - apt: name=python-pudb state=present + + # Graph drawing + - apt: name=python-matplotlib state=present + + # Setup development env for solar + #- shell: python setup.py develop chdir=/vagrant/solar From 9a0fdfc052e29f8a5ae3cb47a74ad068c4d1a6fe Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Fri, 17 Apr 2015 14:21:26 +0200 Subject: [PATCH 18/87] TODO updated Please enter the commit message for your changes. Lines starting --- TODO.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TODO.md b/TODO.md index ef8e4ded..379df970 100644 --- a/TODO.md +++ b/TODO.md @@ -2,12 +2,12 @@ - store all resource configurations somewhere globally (this is required to correctly perform an update on one resource and bubble down to all others) -- tags are kept in resource mata file +- tags are kept in resource mata file (pkaminski) - ansible handler (loles) -- cli - config templates - Deploy HAProxy, Keystone and MariaDB # DONE - connections are made automaticly(pkaminski) - graph is build from CLIENT dict, clients are stored in JSON file (pkaminski) +- cli (pkaminski) From b24df380893693ed7e7e8d956d7b14615fbfc1be Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Fri, 17 Apr 2015 14:44:23 +0200 Subject: [PATCH 19/87] Basic tagging support --- README.md | 11 +++++++++++ cli.py | 32 ++++++++++++++++++++++++++++++-- x/resource.py | 30 +++++++++++++++++++++++------- 3 files changed, 64 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 96ac7285..37a8c633 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,12 @@ python cli.py resource create mariadb_nova_data x/resources/data_container/ rs/ # View resources python cli.py resource show rs/mariadb_keystone_data +# Show all resources at location rs/ +python cli.py resource show rs/ --all + +# Show resources with specific tag +python cli.py resources show rs/ --tag test + # Connect resources python cli.py connect rs/node2 rs/mariadb_keystone_data @@ -87,4 +93,9 @@ python cli.py connections graph # Disconnect python cli.py disconnect rs/mariadb_nova_data rs/node1 + +# Tag a resource: +python cli.py resource tag rs/node1 test-tag +# Remove tag +python cli.py resource tag rs/node1 test-tag --delete ``` diff --git a/cli.py b/cli.py index b3432f00..c1ee2fc5 100644 --- a/cli.py +++ b/cli.py @@ -34,10 +34,38 @@ def init_cli_resource(): resource.add_command(create) + @click.command() + @click.argument('resource_path') + @click.argument('tag_name') + @click.option('--add/--delete', default=True) + def tag(add, tag_name, resource_path): + print 'Tag', resource_path, tag_name, add + r = xr.load(resource_path) + if add: + r.add_tag(tag_name) + else: + r.remove_tag(tag_name) + r.save() + + resource.add_command(tag) + @click.command() @click.argument('path') - def show(path): - print xr.load(path) + @click.option('--all/--one', default=False) + @click.option('--tag', default=None) + def show(tag, all, path): + if all or tag: + for name, resource in xr.load_all(path).items(): + show = True + if tag: + if tag not in resource.tags: + show = False + + if show: + print resource + print + else: + print xr.load(path) resource.add_command(show) diff --git a/x/resource.py b/x/resource.py index 2dd4db51..d574b027 100644 --- a/x/resource.py +++ b/x/resource.py @@ -13,7 +13,7 @@ from x import utils class Resource(object): - def __init__(self, name, metadata, args, base_dir): + def __init__(self, name, metadata, args, base_dir, tags=None): self.name = name self.base_dir = base_dir self.metadata = metadata @@ -22,12 +22,25 @@ class Resource(object): self._validate_args(args) self.args = args self.changed = [] + self.tags = tags or [] def __repr__(self): - return "Resource('name={0}', metadata={1}, args={2}, base_dir='{3}')".format(self.name, - json.dumps(self.metadata), - json.dumps(self.args), - self.base_dir) + return ("Resource('name={0}', metadata={1}, args={2}, " + "base_dir='{3}', tags={4})").format(self.name, + json.dumps(self.metadata), + json.dumps(self.args), + self.base_dir, + self.tags) + + def add_tag(self, tag): + if tag not in self.tags: + self.tags.append(tag) + + def remove_tag(self, tag): + try: + self.tags.remove(tag) + except ValueError: + pass def update(self, args): for key, value in args.iteritems(): @@ -50,6 +63,8 @@ class Resource(object): # TODO: versioning def save(self): + self.metadata['tags'] = self.tags + meta_file = os.path.join(self.base_dir, 'meta.yaml') with open(meta_file, 'w') as f: f.write(yaml.dump(self.metadata)) @@ -65,7 +80,6 @@ def create(name, base_path, dest_path, args, connections={}): dest_path = os.path.join(dest_path, name) base_meta_file = os.path.join(base_path, 'meta.yaml') - meta_file = os.path.join(dest_path, 'meta.yaml') actions_path = os.path.join(base_path, 'actions') meta = yaml.load(open(base_meta_file).read()) @@ -73,6 +87,7 @@ def create(name, base_path, dest_path, args, connections={}): meta['version'] = '1.0.0' meta['actions'] = {} meta['input'] = args + meta['tags'] = [] if os.path.exists(actions_path): for f in os.listdir(actions_path): @@ -93,8 +108,9 @@ def load(dest_path): meta = utils.load_file(meta_file) name = meta['id'] args = meta['input'] + tags = meta.get('tags', []) - resource = Resource(name, meta, args, dest_path) + resource = Resource(name, meta, args, dest_path, tags=tags) db.resource_add(name, resource) From c150f29dc7ef86b2025167c049e9934edde99fba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Fri, 17 Apr 2015 14:32:23 +0000 Subject: [PATCH 20/87] Move handlers to seperate files. Add Ansible handler --- x/actions.py | 4 +-- x/handlers.py | 62 ------------------------------------------ x/handlers/__init__.py | 15 ++++++++++ x/handlers/ansible.py | 33 ++++++++++++++++++++++ x/handlers/base.py | 51 ++++++++++++++++++++++++++++++++++ x/handlers/shell.py | 10 +++++++ 6 files changed, 111 insertions(+), 64 deletions(-) delete mode 100644 x/handlers.py create mode 100644 x/handlers/__init__.py create mode 100644 x/handlers/ansible.py create mode 100644 x/handlers/base.py create mode 100644 x/handlers/shell.py diff --git a/x/actions.py b/x/actions.py index 93285133..2a9611bd 100644 --- a/x/actions.py +++ b/x/actions.py @@ -4,8 +4,8 @@ import handlers def resource_action(resource, action): handler = resource.metadata['handler'] - handler = handlers.get(handler) - handler().action(resource, action) + with handlers.get(handler)([resource]) as h: + h.action(resource, action) def tag_action(tag, action): #TODO diff --git a/x/handlers.py b/x/handlers.py deleted file mode 100644 index a6c17e17..00000000 --- a/x/handlers.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: UTF-8 -*- -import os -import subprocess -import tempfile - -from jinja2 import Template - - -def get(handler_name): - handler = HANDLERS.get(handler_name, None) - if handler: - return handler - raise Exception('Handler {0} does not exist'.format(handler_name)) - - -class Ansible(object): - """TODO""" - def __init__(self): - pass - - def action(self, resource, action): - pass - - def _get_connection(self, resource): - return {'ssh_user': '', - 'ssh_key': '', - 'host': ''} - - def _create_inventory(self, dest_dir): - pass - - def _create_playbook(self, dest_dir): - pass - - -class Shell(object): - def __init__(self): - pass - - def action(self, resource, action): - action_file = resource.metadata['actions'][action] - action_file = os.path.join(resource.base_dir, action_file) - with open(action_file) as f: - tpl = Template(f.read()) - tpl = tpl.render(resource.args) - - tmp_file = tempfile.mkstemp(text=True)[1] - with open(tmp_file, 'w') as f: - f.write(tpl) - - subprocess.call(['bash', tmp_file]) - - -class Empty(object): - def action(self, resource, action): - pass - - -HANDLERS = {'ansible': Ansible, - 'shell': Shell, - 'none': Empty} - diff --git a/x/handlers/__init__.py b/x/handlers/__init__.py new file mode 100644 index 00000000..1fc4e43d --- /dev/null +++ b/x/handlers/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: UTF-8 -*- +from x.handlers.ansible import Ansible +from x.handlers.base import Empty +from x.handlers.shell import Shell + + +HANDLERS = {'ansible': Ansible, + 'shell': Shell, + 'none': Empty} + +def get(handler_name): + handler = HANDLERS.get(handler_name, None) + if handler: + return handler + raise Exception('Handler {0} does not exist'.format(handler_name)) diff --git a/x/handlers/ansible.py b/x/handlers/ansible.py new file mode 100644 index 00000000..d4e82513 --- /dev/null +++ b/x/handlers/ansible.py @@ -0,0 +1,33 @@ +# -*- coding: UTF-8 -*- +import os +import subprocess + +from x.handlers.base import BaseHandler + + +class Ansible(BaseHandler): + def action(self, resource, action): + inventory_file = self._create_inventory(resource) + playbook_file = self._create_playbook(resource, action) + subprocess.call(['ansible-playbook', '-i', inventory_file, playbook_file]) + + #def _get_connection(self, resource): + # return {'ssh_user': '', + # 'ssh_key': '', + # 'host': ''} + + def _create_inventory(self, r): + inventory = '{0} ansible_ssh_host={1} ansible_connection=ssh ansible_ssh_user={2} ansible_ssh_private_key_file={3}' + host, user, ssh_key = r.args['ip'], r.args['ssh_user'], r.args['ssh_key'] + print host + print user + print ssh_key + inventory = inventory.format(host, host, user, ssh_key) + directory = self.dirs[r.name] + inventory_path = os.path.join(directory, 'inventory') + with open(inventory_path, 'w') as inv: + inv.write(inventory) + return inventory_path + + def _create_playbook(self, resource, action): + return self._compile_action_file(resource, action) diff --git a/x/handlers/base.py b/x/handlers/base.py new file mode 100644 index 00000000..e0179582 --- /dev/null +++ b/x/handlers/base.py @@ -0,0 +1,51 @@ +# -*- coding: UTF-8 -*- +import os +import shutil +import tempfile + +from jinja2 import Template + + +class BaseHandler(object): + def __init__(self, resources): + self.dst = tempfile.mkdtemp() + self.resources = resources + + def __enter__(self): + self.dirs = {} + for resource in self.resources: + resource_dir = tempfile.mkdtemp(suffix=resource.name, dir=self.dst) + self.dirs[resource.name] = resource_dir + return self + + def __exit__(self, type, value, traceback): + print self.dst + return + shutil.rmtree(self.dst) + + def _compile_action_file(self, resource, action): + action_file = resource.metadata['actions'][action] + action_file = os.path.join(resource.base_dir, 'actions', action_file) + dir_path = self.dirs[resource.name] + dest_file = tempfile.mkstemp(text=True, prefix=action, dir=dir_path)[1] + args = self._make_args(resource) + self._compile_file(action_file, dest_file, args) + return dest_file + + def _compile_file(self, template, dest_file, args): + with open(template) as f: + tpl = Template(f.read()) + tpl = tpl.render(args) + + with open(dest_file, 'w') as f: + f.write(tpl) + + def _make_args(self, resource): + args = {'name' : resource.name} + args.update(resource.args) + return args + + +class Empty(BaseHandler): + def action(self, resource, action): + pass diff --git a/x/handlers/shell.py b/x/handlers/shell.py new file mode 100644 index 00000000..847b1498 --- /dev/null +++ b/x/handlers/shell.py @@ -0,0 +1,10 @@ +# -*- coding: UTF-8 -*- +import subprocess + +from x.handlers.base import BaseHandler + + +class Shell(BaseHandler): + def action(self, resource, action): + action_file = self._compile_action_file(resource, action) + subprocess.call(['bash', action_file]) From 23a51bb27bb4713a808786b36559039216dd61ae Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 11:03:54 +0200 Subject: [PATCH 21/87] Fix guess_mapping mapping value --- x/signals.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/signals.py b/x/signals.py index 59ba2e09..5ba4cb73 100644 --- a/x/signals.py +++ b/x/signals.py @@ -33,7 +33,7 @@ def guess_mapping(emitter, receiver): guessed = {} for key in emitter.requires: if key in receiver.requires: - guessed[key] = '{}.{}'.format(emitter.name, key) + guessed[key] = key return guessed From c02f57c92e1f128daeac3a7fc41f0903940b287f Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 11:14:59 +0200 Subject: [PATCH 22/87] Disconnect receiver inputs before reconnecting to a new one --- x/actions.py | 1 + x/handlers.py | 12 ++++++------ x/signals.py | 19 +++++++++++++++++++ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/x/actions.py b/x/actions.py index 93285133..d4eaf98b 100644 --- a/x/actions.py +++ b/x/actions.py @@ -7,6 +7,7 @@ def resource_action(resource, action): handler = handlers.get(handler) handler().action(resource, action) + def tag_action(tag, action): #TODO pass diff --git a/x/handlers.py b/x/handlers.py index a6c17e17..90475085 100644 --- a/x/handlers.py +++ b/x/handlers.py @@ -18,7 +18,7 @@ class Ansible(object): def __init__(self): pass - def action(self, resource, action): + def action(self, resource, action_name): pass def _get_connection(self, resource): @@ -37,22 +37,22 @@ class Shell(object): def __init__(self): pass - def action(self, resource, action): - action_file = resource.metadata['actions'][action] + def action(self, resource, action_name): + action_file = resource.metadata['actions'][action_name] action_file = os.path.join(resource.base_dir, action_file) with open(action_file) as f: tpl = Template(f.read()) tpl = tpl.render(resource.args) tmp_file = tempfile.mkstemp(text=True)[1] - with open(tmp_file, 'w') as f: - f.write(tpl) + with open(tmp_file, 'w') as g: + g.write(tpl) subprocess.call(['bash', tmp_file]) class Empty(object): - def action(self, resource, action): + def action(self, resource, action_name): pass diff --git a/x/signals.py b/x/signals.py index 5ba4cb73..e117f102 100644 --- a/x/signals.py +++ b/x/signals.py @@ -44,6 +44,10 @@ def connect(emitter, receiver, mapping=None): guessed.update(mapping) for src, dst in guessed.items(): + # disconnect all receiver inputs + # TODO: check if receiver input is of list type first + disconnect_receiver_by_input(receiver, dst) + CLIENTS.setdefault(emitter.name, {}) CLIENTS[emitter.name].setdefault(src, []) CLIENTS[emitter.name][src].append((receiver.name, dst)) @@ -61,6 +65,21 @@ def disconnect(emitter, receiver): utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) +def disconnect_receiver_by_input(receiver, input): + """Find receiver connection by input and disconnect it. + + :param receiver: + :param input: + :return: + """ + for emitter_name, inputs in CLIENTS.items(): + if input in inputs: + inputs[input] = [ + destination for destination in inputs[input] + if destination[0] != receiver.name + ] + + def notify(source, key, value): CLIENTS.setdefault(source.name, []) print 'Notify', source.name, key, value, CLIENTS[source.name] From 73e09b3b6aa16291437ba828d51507e838656bc8 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 11:17:44 +0200 Subject: [PATCH 23/87] TODO updated --- TODO.md | 1 + 1 file changed, 1 insertion(+) diff --git a/TODO.md b/TODO.md index 313d6cc7..8a75594f 100644 --- a/TODO.md +++ b/TODO.md @@ -3,6 +3,7 @@ - store all resource configurations somewhere globally (this is required to correctly perform an update on one resource and bubble down to all others) - tags are kept in resource mata file (pkaminski) +- add 'list' connection type (pkaminski) - ansible handler (loles) - config templates - Deploy HAProxy, Keystone and MariaDB From d6e97dab9b7ed582f3b09a168d9936cfc97bc2a7 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 12:25:26 +0200 Subject: [PATCH 24/87] HAProxy deployment elements - haproxy-deployment.sh script - keystone, nova, haproxy_config, haproxy resources --- README.md | 2 + haproxy-deployment.sh | 47 +++++++++++++++++++ x/resources/haproxy/actions/remove.yml | 6 +++ x/resources/haproxy/actions/run.yml | 6 +++ x/resources/haproxy/meta.yaml | 5 ++ x/resources/haproxy_config/actions/remove.yml | 6 +++ x/resources/haproxy_config/actions/run.yml | 6 +++ x/resources/haproxy_config/meta.yaml | 5 ++ x/resources/keystone/actions/remove.yml | 6 +++ x/resources/keystone/actions/run.yml | 6 +++ x/resources/keystone/meta.yaml | 5 ++ x/resources/nova/actions/remove.yml | 6 +++ x/resources/nova/actions/run.yml | 6 +++ x/resources/nova/meta.yaml | 5 ++ 14 files changed, 117 insertions(+) create mode 100755 haproxy-deployment.sh create mode 100644 x/resources/haproxy/actions/remove.yml create mode 100644 x/resources/haproxy/actions/run.yml create mode 100644 x/resources/haproxy/meta.yaml create mode 100644 x/resources/haproxy_config/actions/remove.yml create mode 100644 x/resources/haproxy_config/actions/run.yml create mode 100644 x/resources/haproxy_config/meta.yaml create mode 100644 x/resources/keystone/actions/remove.yml create mode 100644 x/resources/keystone/actions/run.yml create mode 100644 x/resources/keystone/meta.yaml create mode 100644 x/resources/nova/actions/remove.yml create mode 100644 x/resources/nova/actions/run.yml create mode 100644 x/resources/nova/meta.yaml diff --git a/README.md b/README.md index 37a8c633..f9212223 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +# x + ## Usage: Creating resources: diff --git a/haproxy-deployment.sh b/haproxy-deployment.sh new file mode 100755 index 00000000..1fae686f --- /dev/null +++ b/haproxy-deployment.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +# HAProxy deployment with Keystone and Nova + +cd /vagrant + +rm clients.json +rm -Rf rs/* + +# Create resources +python cli.py resource create node1 x/resources/ro_node/ rs/ '{"ip":"10.0.0.3", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' +python cli.py resource create node2 x/resources/ro_node/ rs/ '{"ip":"10.0.0.4", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' +python cli.py resource create node3 x/resources/ro_node/ rs/ '{"ip":"10.0.0.5", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' +python cli.py resource create node4 x/resources/ro_node/ rs/ '{"ip":"10.0.0.6", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' +python cli.py resource create node5 x/resources/ro_node/ rs/ '{"ip":"10.0.0.7", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}' + +python cli.py resource create mariadb_keystone1_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create mariadb_keystone2_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create keystone1 x/resources/keystone/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create keystone2 x/resources/keystone/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create haproxy_keystone_config x/resources/haproxy_config/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' + +python cli.py resource create mariadb_nova1_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create mariadb_nova2_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create nova1 x/resources/nova/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create nova2 x/resources/nova/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create haproxy_nova_config x/resources/haproxy_config/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' + +python cli.py resource create haproxy x/resources/haproxy/ rs/ '{"configs": [], "ssh_user": "", "ssh_key": ""}' + + +# Connect resources +python cli.py connect rs/node1 rs/mariadb_keystone1_data +python cli.py connect rs/node2 rs/mariadb_keystone2_data +python cli.py connect rs/mariadb_keystone1_data rs/keystone1 +python cli.py connect rs/mariadb_keystone2_data rs/keystone2 +python cli.py connect rs/haproxy_keystone_config rs/keystone2 --mapping '{"ip": "servers"}' + +python cli.py connect rs/node3 rs/mariadb_nova1_data +python cli.py connect rs/node4 rs/mariadb_nova2_data +python cli.py connect rs/mariadb_nova1_data rs/nova1 +python cli.py connect rs/mariadb_nova2_data rs/nova2 +python cli.py connect rs/nova2 rs/haproxy_nova_config --mapping '{"ip": "servers"}' + +python cli.py connect rs/node5 rs/haproxy +python cli.py connect rs/haproxy_keystone_config rs/haproxy --mapping '{"server": "servers"}' +python cli.py connect rs/haproxy_nova_config rs/haproxy --mapping '{"server": "servers"}' diff --git a/x/resources/haproxy/actions/remove.yml b/x/resources/haproxy/actions/remove.yml new file mode 100644 index 00000000..76142acf --- /dev/null +++ b/x/resources/haproxy/actions/remove.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/x/resources/haproxy/actions/run.yml b/x/resources/haproxy/actions/run.yml new file mode 100644 index 00000000..e223fe8f --- /dev/null +++ b/x/resources/haproxy/actions/run.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker run -d --net="host" --privileged \ + --name {{ name }} {{ image }} diff --git a/x/resources/haproxy/meta.yaml b/x/resources/haproxy/meta.yaml new file mode 100644 index 00000000..568783c1 --- /dev/null +++ b/x/resources/haproxy/meta.yaml @@ -0,0 +1,5 @@ +id: haproxy +handler: ansible +version: 1.0.0 +input: + servers: diff --git a/x/resources/haproxy_config/actions/remove.yml b/x/resources/haproxy_config/actions/remove.yml new file mode 100644 index 00000000..76142acf --- /dev/null +++ b/x/resources/haproxy_config/actions/remove.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/x/resources/haproxy_config/actions/run.yml b/x/resources/haproxy_config/actions/run.yml new file mode 100644 index 00000000..e223fe8f --- /dev/null +++ b/x/resources/haproxy_config/actions/run.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker run -d --net="host" --privileged \ + --name {{ name }} {{ image }} diff --git a/x/resources/haproxy_config/meta.yaml b/x/resources/haproxy_config/meta.yaml new file mode 100644 index 00000000..d6723a85 --- /dev/null +++ b/x/resources/haproxy_config/meta.yaml @@ -0,0 +1,5 @@ +id: haproxy_config +handler: ansible +version: 1.0.0 +input: + servers: diff --git a/x/resources/keystone/actions/remove.yml b/x/resources/keystone/actions/remove.yml new file mode 100644 index 00000000..76142acf --- /dev/null +++ b/x/resources/keystone/actions/remove.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/x/resources/keystone/actions/run.yml b/x/resources/keystone/actions/run.yml new file mode 100644 index 00000000..e223fe8f --- /dev/null +++ b/x/resources/keystone/actions/run.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker run -d --net="host" --privileged \ + --name {{ name }} {{ image }} diff --git a/x/resources/keystone/meta.yaml b/x/resources/keystone/meta.yaml new file mode 100644 index 00000000..064045fa --- /dev/null +++ b/x/resources/keystone/meta.yaml @@ -0,0 +1,5 @@ +id: keystone +handler: ansible +version: 1.0.0 +input: + image: garland/docker-openstack-keystone diff --git a/x/resources/nova/actions/remove.yml b/x/resources/nova/actions/remove.yml new file mode 100644 index 00000000..76142acf --- /dev/null +++ b/x/resources/nova/actions/remove.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker stop {{ name }} + - shell: docker rm {{ name }} diff --git a/x/resources/nova/actions/run.yml b/x/resources/nova/actions/run.yml new file mode 100644 index 00000000..e223fe8f --- /dev/null +++ b/x/resources/nova/actions/run.yml @@ -0,0 +1,6 @@ +# TODO +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: docker run -d --net="host" --privileged \ + --name {{ name }} {{ image }} diff --git a/x/resources/nova/meta.yaml b/x/resources/nova/meta.yaml new file mode 100644 index 00000000..180686ed --- /dev/null +++ b/x/resources/nova/meta.yaml @@ -0,0 +1,5 @@ +id: nova +handler: ansible +version: 1.0.0 +input: + image: # TODO From c9e17474b5a76031e41cdb6a443c0412e01a2909 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 14:04:16 +0200 Subject: [PATCH 25/87] List input-type introduced --- haproxy-deployment.sh | 17 ++++++++++------- x/resource.py | 10 ++++++++-- x/resources/haproxy/meta.yaml | 4 +++- x/resources/haproxy_config/meta.yaml | 2 ++ x/signals.py | 26 ++++++++++++++++++++++---- 5 files changed, 45 insertions(+), 14 deletions(-) diff --git a/haproxy-deployment.sh b/haproxy-deployment.sh index 1fae686f..c4602508 100755 --- a/haproxy-deployment.sh +++ b/haproxy-deployment.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash - # HAProxy deployment with Keystone and Nova +set -e + cd /vagrant rm clients.json @@ -18,15 +19,15 @@ python cli.py resource create mariadb_keystone1_data x/resources/data_container/ python cli.py resource create mariadb_keystone2_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' python cli.py resource create keystone1 x/resources/keystone/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' python cli.py resource create keystone2 x/resources/keystone/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' -python cli.py resource create haproxy_keystone_config x/resources/haproxy_config/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create haproxy_keystone_config x/resources/haproxy_config/ rs/ '{"servers": {}, "ssh_user": "", "ssh_key": ""}' python cli.py resource create mariadb_nova1_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' python cli.py resource create mariadb_nova2_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' python cli.py resource create nova1 x/resources/nova/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' python cli.py resource create nova2 x/resources/nova/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' -python cli.py resource create haproxy_nova_config x/resources/haproxy_config/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create haproxy_nova_config x/resources/haproxy_config/ rs/ '{"servers": {}, "ssh_user": "", "ssh_key": ""}' -python cli.py resource create haproxy x/resources/haproxy/ rs/ '{"configs": [], "ssh_user": "", "ssh_key": ""}' +python cli.py resource create haproxy x/resources/haproxy/ rs/ '{"configs": {}, "ssh_user": "", "ssh_key": ""}' # Connect resources @@ -34,14 +35,16 @@ python cli.py connect rs/node1 rs/mariadb_keystone1_data python cli.py connect rs/node2 rs/mariadb_keystone2_data python cli.py connect rs/mariadb_keystone1_data rs/keystone1 python cli.py connect rs/mariadb_keystone2_data rs/keystone2 -python cli.py connect rs/haproxy_keystone_config rs/keystone2 --mapping '{"ip": "servers"}' +python cli.py connect rs/keystone1 rs/haproxy_keystone_config --mapping '{"ip": "servers"}' +python cli.py connect rs/keystone2 rs/haproxy_keystone_config --mapping '{"ip": "servers"}' python cli.py connect rs/node3 rs/mariadb_nova1_data python cli.py connect rs/node4 rs/mariadb_nova2_data python cli.py connect rs/mariadb_nova1_data rs/nova1 python cli.py connect rs/mariadb_nova2_data rs/nova2 +python cli.py connect rs/nova1 rs/haproxy_nova_config --mapping '{"ip": "servers"}' python cli.py connect rs/nova2 rs/haproxy_nova_config --mapping '{"ip": "servers"}' python cli.py connect rs/node5 rs/haproxy -python cli.py connect rs/haproxy_keystone_config rs/haproxy --mapping '{"server": "servers"}' -python cli.py connect rs/haproxy_nova_config rs/haproxy --mapping '{"server": "servers"}' +python cli.py connect rs/haproxy_keystone_config rs/haproxy --mapping '{"server": "configs"}' +python cli.py connect rs/haproxy_nova_config rs/haproxy --mapping '{"server": "configs"}' diff --git a/x/resource.py b/x/resource.py index d574b027..ecf0c30e 100644 --- a/x/resource.py +++ b/x/resource.py @@ -21,6 +21,7 @@ class Resource(object): self.requires = metadata['input'].keys() self._validate_args(args) self.args = args + self.input_types = metadata.get('input-types', {}) self.changed = [] self.tags = tags or [] @@ -42,9 +43,14 @@ class Resource(object): except ValueError: pass - def update(self, args): + def update(self, args, emitter_name=None): for key, value in args.iteritems(): - self.args[key] = value + if self.input_types.get(key, '') == 'list': + if emitter_name is None: + raise Exception('I need to know then emitter when updating input of list type') + self.args[key][emitter_name] = value + else: + self.args[key] = value self.changed.append(key) signals.notify(self, key, value) diff --git a/x/resources/haproxy/meta.yaml b/x/resources/haproxy/meta.yaml index 568783c1..63c64ebf 100644 --- a/x/resources/haproxy/meta.yaml +++ b/x/resources/haproxy/meta.yaml @@ -2,4 +2,6 @@ id: haproxy handler: ansible version: 1.0.0 input: - servers: + configs: +input-types: + configs: list diff --git a/x/resources/haproxy_config/meta.yaml b/x/resources/haproxy_config/meta.yaml index d6723a85..0c767796 100644 --- a/x/resources/haproxy_config/meta.yaml +++ b/x/resources/haproxy_config/meta.yaml @@ -3,3 +3,5 @@ handler: ansible version: 1.0.0 input: servers: +input-types: + servers: list diff --git a/x/signals.py b/x/signals.py index e117f102..91064bea 100644 --- a/x/signals.py +++ b/x/signals.py @@ -44,9 +44,10 @@ def connect(emitter, receiver, mapping=None): guessed.update(mapping) for src, dst in guessed.items(): - # disconnect all receiver inputs - # TODO: check if receiver input is of list type first - disconnect_receiver_by_input(receiver, dst) + # Disconnect all receiver inputs + # Check if receiver input is of list type first + if receiver.input_types.get(dst, '') != 'list': + disconnect_receiver_by_input(receiver, dst) CLIENTS.setdefault(emitter.name, {}) CLIENTS[emitter.name].setdefault(src, []) @@ -57,11 +58,28 @@ def connect(emitter, receiver, mapping=None): def disconnect(emitter, receiver): for src, destinations in CLIENTS[emitter.name].items(): + destinations = [ + destination for destination in destinations + if destination[0] == receiver.name + ] + + for destination in destinations: + receiver_input = destination[1] + if receiver.input_types.get(receiver_input, '') == 'list': + print 'Removing input {} from {}'.format(receiver_input, receiver.name) + # TODO: update here? We're deleting an input... + receiver.args[receiver_input] = { + k: v for k, v in receiver.args.get(receiver_input, {}).items() + if k != emitter.name + } + CLIENTS[emitter.name][src] = [ destination for destination in destinations if destination[0] != receiver.name ] + # Inputs might have changed + receiver.save() utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) @@ -88,7 +106,7 @@ def notify(source, key, value): resource = db.get_resource(client) print 'Resource found', client if resource: - resource.update({r_key: value}) + resource.update({r_key: value}, emitter_name=source.name) else: print 'Resource {} deleted?'.format(client) pass From fb7b3c6980124f6593cb6b2fc75a020230b4e1cb Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 14:15:56 +0200 Subject: [PATCH 26/87] Set emitter's values to receiver upon connection --- haproxy-deployment.sh | 4 ++-- x/signals.py | 19 +++++++++++++------ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/haproxy-deployment.sh b/haproxy-deployment.sh index c4602508..eb434a1d 100755 --- a/haproxy-deployment.sh +++ b/haproxy-deployment.sh @@ -23,8 +23,8 @@ python cli.py resource create haproxy_keystone_config x/resources/haproxy_config python cli.py resource create mariadb_nova1_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' python cli.py resource create mariadb_nova2_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' -python cli.py resource create nova1 x/resources/nova/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' -python cli.py resource create nova2 x/resources/nova/ rs/ '{"server": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create nova1 x/resources/nova/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' +python cli.py resource create nova2 x/resources/nova/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' python cli.py resource create haproxy_nova_config x/resources/haproxy_config/ rs/ '{"servers": {}, "ssh_user": "", "ssh_key": ""}' python cli.py resource create haproxy x/resources/haproxy/ rs/ '{"configs": {}, "ssh_user": "", "ssh_key": ""}' diff --git a/x/signals.py b/x/signals.py index 91064bea..15c0fc4b 100644 --- a/x/signals.py +++ b/x/signals.py @@ -44,6 +44,12 @@ def connect(emitter, receiver, mapping=None): guessed.update(mapping) for src, dst in guessed.items(): + # Copy emitter's values to receiver + if receiver.input_types.get(dst, '') != 'list': + receiver.args[dst] = emitter.args[src] + elif src in emitter.args: + receiver.args[dst][emitter.name] = emitter.args[src] + # Disconnect all receiver inputs # Check if receiver input is of list type first if receiver.input_types.get(dst, '') != 'list': @@ -53,6 +59,7 @@ def connect(emitter, receiver, mapping=None): CLIENTS[emitter.name].setdefault(src, []) CLIENTS[emitter.name][src].append((receiver.name, dst)) + receiver.save() utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) @@ -114,12 +121,12 @@ def notify(source, key, value): def assign_connections(receiver, connections): mappings = defaultdict(list) - for key, dest in connections.iteritems(): - resource, r_key = dest.split('.') - resource = db.get_resource(resource) - value = resource.args[r_key] - receiver.args[key] = value - mappings[resource].append((r_key, key)) + #for key, dest in connections.iteritems(): + # resource, r_key = dest.split('.') + # resource = db.get_resource(resource) + # value = resource.args[r_key] + # receiver.args[key] = value + # mappings[resource].append((r_key, key)) for resource, r_mappings in mappings.iteritems(): connect(resource, receiver, r_mappings) From 3d214954af1eeb19973550e663e7f546cdb0d962 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 17:34:36 +0200 Subject: [PATCH 27/87] Connect event bubbling --- haproxy-deployment.sh | 4 ++-- x/resource.py | 8 ++++---- x/signals.py | 14 ++++++-------- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/haproxy-deployment.sh b/haproxy-deployment.sh index eb434a1d..81e47b7b 100755 --- a/haproxy-deployment.sh +++ b/haproxy-deployment.sh @@ -25,9 +25,9 @@ python cli.py resource create mariadb_nova1_data x/resources/data_container/ rs/ python cli.py resource create mariadb_nova2_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}' python cli.py resource create nova1 x/resources/nova/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' python cli.py resource create nova2 x/resources/nova/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}' -python cli.py resource create haproxy_nova_config x/resources/haproxy_config/ rs/ '{"servers": {}, "ssh_user": "", "ssh_key": ""}' +python cli.py resource create haproxy_nova_config x/resources/haproxy_config/ rs/ '{"ip": "", "servers": {}, "ssh_user": "", "ssh_key": ""}' -python cli.py resource create haproxy x/resources/haproxy/ rs/ '{"configs": {}, "ssh_user": "", "ssh_key": ""}' +python cli.py resource create haproxy x/resources/haproxy/ rs/ '{"ip": "", "configs": {}, "ssh_user": "", "ssh_key": ""}' # Connect resources diff --git a/x/resource.py b/x/resource.py index ecf0c30e..8644a61c 100644 --- a/x/resource.py +++ b/x/resource.py @@ -43,12 +43,12 @@ class Resource(object): except ValueError: pass - def update(self, args, emitter_name=None): + def update(self, args, emitter=None): for key, value in args.iteritems(): if self.input_types.get(key, '') == 'list': - if emitter_name is None: - raise Exception('I need to know then emitter when updating input of list type') - self.args[key][emitter_name] = value + if emitter is None: + raise Exception('I need to know the emitter when updating input of list type') + self.args[key][emitter.name] = value else: self.args[key] = value self.changed.append(key) diff --git a/x/signals.py b/x/signals.py index 15c0fc4b..ce4c516e 100644 --- a/x/signals.py +++ b/x/signals.py @@ -44,12 +44,6 @@ def connect(emitter, receiver, mapping=None): guessed.update(mapping) for src, dst in guessed.items(): - # Copy emitter's values to receiver - if receiver.input_types.get(dst, '') != 'list': - receiver.args[dst] = emitter.args[src] - elif src in emitter.args: - receiver.args[dst][emitter.name] = emitter.args[src] - # Disconnect all receiver inputs # Check if receiver input is of list type first if receiver.input_types.get(dst, '') != 'list': @@ -59,6 +53,10 @@ def connect(emitter, receiver, mapping=None): CLIENTS[emitter.name].setdefault(src, []) CLIENTS[emitter.name][src].append((receiver.name, dst)) + # Copy emitter's values to receiver + if src in emitter.args: + receiver.update({dst: emitter.args[src]}, emitter=emitter) + receiver.save() utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) @@ -106,14 +104,14 @@ def disconnect_receiver_by_input(receiver, input): def notify(source, key, value): - CLIENTS.setdefault(source.name, []) + CLIENTS.setdefault(source.name, {}) print 'Notify', source.name, key, value, CLIENTS[source.name] if key in CLIENTS[source.name]: for client, r_key in CLIENTS[source.name][key]: resource = db.get_resource(client) print 'Resource found', client if resource: - resource.update({r_key: value}, emitter_name=source.name) + resource.update({r_key: value}, emitter=source) else: print 'Resource {} deleted?'.format(client) pass From dbd34f320bf64afc5f7a8a79f8d6bd803c26fbd7 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 17:34:47 +0200 Subject: [PATCH 28/87] Connect -- provided mapping overrides guessed This way we can have multiple connections to one resource and only one of them can safely provide the ip, without others overriding it. --- x/signals.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/x/signals.py b/x/signals.py index ce4c516e..bb7045f0 100644 --- a/x/signals.py +++ b/x/signals.py @@ -39,9 +39,10 @@ def guess_mapping(emitter, receiver): def connect(emitter, receiver, mapping=None): - mapping = mapping or {} + #mapping = mapping or {} guessed = guess_mapping(emitter, receiver) - guessed.update(mapping) + #guessed.update(mapping) + mapping = mapping or guessed for src, dst in guessed.items(): # Disconnect all receiver inputs From a3c5d5a0e63671a27cd69691f0a761d529938829 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 17:45:57 +0200 Subject: [PATCH 29/87] One more connect 'guessed' fix --- x/signals.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/signals.py b/x/signals.py index bb7045f0..604f9717 100644 --- a/x/signals.py +++ b/x/signals.py @@ -44,7 +44,7 @@ def connect(emitter, receiver, mapping=None): #guessed.update(mapping) mapping = mapping or guessed - for src, dst in guessed.items(): + for src, dst in mapping.items(): # Disconnect all receiver inputs # Check if receiver input is of list type first if receiver.input_types.get(dst, '') != 'list': From 6bfc7fd1c0ddede2a7cbcec1c713b647f2a6dcdb Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 19:11:28 +0200 Subject: [PATCH 30/87] Deployment from YAML file, added tests for haproxy --- cli.py | 23 +++ haproxy_deployment/__init__.py | 1 + .../haproxy-deployment.sh | 0 haproxy_deployment/haproxy-deployment.yaml | 166 ++++++++++++++++++ haproxy_deployment/haproxy_deployment.py | 61 +++++++ x/deployment.py | 50 ++++++ 6 files changed, 301 insertions(+) create mode 100644 haproxy_deployment/__init__.py rename haproxy-deployment.sh => haproxy_deployment/haproxy-deployment.sh (100%) create mode 100755 haproxy_deployment/haproxy-deployment.yaml create mode 100644 haproxy_deployment/haproxy_deployment.py create mode 100644 x/deployment.py diff --git a/cli.py b/cli.py index c1ee2fc5..d10dcbee 100644 --- a/cli.py +++ b/cli.py @@ -6,6 +6,8 @@ import matplotlib.pyplot as plt import networkx as nx import os +from x import actions as xa +from x import deployment as xd from x import resource as xr from x import signals as xs @@ -22,6 +24,16 @@ def init_cli_resource(): cli.add_command(resource) + @click.command() + @click.argument('resource_path') + @click.argument('action_name') + def action(action_name, resource_path): + print 'action', resource_path, action_name + r = xr.load(resource_path) + xa.resource_action(r, action_name) + + resource.add_command(action) + @click.command() @click.argument('name') @click.argument('base_path') @@ -144,9 +156,20 @@ def init_cli_connections(): connections.add_command(graph) +def init_cli_deployment_config(): + @click.command() + @click.argument('filepath') + def deploy(filepath): + print 'Deploying from file {}'.format(filepath) + xd.deploy(filepath) + + cli.add_command(deploy) + + if __name__ == '__main__': init_cli_resource() init_cli_connect() init_cli_connections() + init_cli_deployment_config() cli() diff --git a/haproxy_deployment/__init__.py b/haproxy_deployment/__init__.py new file mode 100644 index 00000000..4bf2011a --- /dev/null +++ b/haproxy_deployment/__init__.py @@ -0,0 +1 @@ +__author__ = 'przemek' diff --git a/haproxy-deployment.sh b/haproxy_deployment/haproxy-deployment.sh similarity index 100% rename from haproxy-deployment.sh rename to haproxy_deployment/haproxy-deployment.sh diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml new file mode 100755 index 00000000..55b4a7ca --- /dev/null +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -0,0 +1,166 @@ +# HAProxy deployment with MariaDB, Keystone and Nova + +workdir: /vagrant +resource-save-path: rs/ +test-suite: haproxy_deployment.haproxy_deployment + +resources: + - name: node1 + model: x/resources/ro_node/ + args: + ip: 10.0.0.3 + ssh_key: /vagrant/tmp/keys/ssh_private + ssh_user: vagrant + - name: node2 + model: x/resources/ro_node/ + args: + ip: 10.0.0.4 + ssh_key: /vagrant/tmp/keys/ssh_private + ssh_user: vagrant + - name: node3 + model: x/resources/ro_node/ + args: + ip: 10.0.0.5 + ssh_key: /vagrant/tmp/keys/ssh_private + ssh_user: vagrant + - name: node4 + model: x/resources/ro_node/ + args: + ip: 10.0.0.6 + ssh_key: /vagrant/tmp/keys/ssh_private + ssh_user: vagrant + - name: node5 + model: x/resources/ro_node/ + args: + ip: 10.0.0.7 + ssh_key: /vagrant/tmp/keys/ssh_private + ssh_user: vagrant + + - name: mariadb_keystone1_data + model: x/resources/data_container/ + args: + image: mariadb + export_volumes: + - /var/lib/mysql + ip: + ssh_user: + ssh_key: + - name: mariadb_keystone2_data + model: x/resources/data_container/ + args: + image: mariadb + export_volumes: + - /var/lib/mysql + ip: + ssh_user: + ssh_key: + - name: keystone1 + model: x/resources/keystone/ + args: + ip: + ssh_user: + ssh_key: + - name: keystone2 + model: x/resources/keystone/ + args: + ip: + ssh_user: + ssh_key: + - name: haproxy_keystone_config + model: x/resources/haproxy_config/ + args: + servers: {} + ssh_user: + ssh_key: + + - name: mariadb_nova1_data + model: x/resources/data_container/ + args: + image: mariadb + export_volumes: + - /var/lib/mysql + ip: + ssh_user: + ssh_key: + - name: mariadb_nova2_data + model: x/resources/data_container/ + args: + image: mariadb + export_volumes: + - /var/lib/mysql + ip: + ssh_user: + ssh_key: + - name: nova1 + model: x/resources/nova/ + args: + ip: + ssh_user: + ssh_key: + - name: nova2 + model: x/resources/nova/ + args: + ip: + ssh_user: + ssh_key: + - name: haproxy_nova_config + model: x/resources/haproxy_config/ + args: + servers: {} + ssh_user: + ssh_key: + + - name: haproxy + model: x/resources/haproxy/ + args: + ip: + configs: {} + ssh_user: + ssh_key: + + +connections: + - emitter: node1 + receiver: mariadb_keystone1_data + - emitter: node2 + receiver: mariadb_keystone2_data + - emitter: mariadb_keystone1_data + receiver: keystone1 + - emitter: mariadb_keystone2_data + receiver: keystone2 + - emitter: keystone1 + receiver: haproxy_keystone_config + mapping: + ip: servers + - emitter: keystone2 + receiver: haproxy_keystone_config + mapping: + ip: servers + + - emitter: node3 + receiver: mariadb_nova1_data + - emitter: node4 + receiver: mariadb_nova2_data + - emitter: mariadb_nova1_data + receiver: nova1 + - emitter: mariadb_nova2_data + receiver: nova2 + - emitter: nova1 + receiver: haproxy_nova_config + mapping: + ip: servers + - emitter: nova2 + receiver: haproxy_nova_config + mapping: + ip: servers + + - emitter: node5 + receiver: haproxy + - emitter: haproxy_keystone_config + receiver: haproxy + mapping: + server: configs + - emitter: haproxy_nova_config + receiver: haproxy + mapping: + server: configs diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py new file mode 100644 index 00000000..c7843889 --- /dev/null +++ b/haproxy_deployment/haproxy_deployment.py @@ -0,0 +1,61 @@ +import unittest + +from x import db + + +class TestHAProxyDeployment(unittest.TestCase): + def test_keystone_config(self): + node1 = db.get_resource('node1') + node2 = db.get_resource('node2') + keystone1 = db.get_resource('keystone1') + keystone2 = db.get_resource('keystone2') + + self.assertEqual(keystone1.args['ip'], node1.args['ip']) + self.assertEqual(keystone2.args['ip'], node2.args['ip']) + + def test_haproxy_keystone_config(self): + keystone1 = db.get_resource('keystone1') + keystone2 = db.get_resource('keystone2') + haproxy_keystone_config = db.get_resource('haproxy_keystone_config') + + self.assertDictEqual( + haproxy_keystone_config.args['servers'], + { + 'keystone1': keystone1.args['ip'], + 'keystone2': keystone2.args['ip'], + } + ) + + def test_nova_config(self): + node3 = db.get_resource('node3') + node4 = db.get_resource('node4') + nova1 = db.get_resource('nova1') + nova2 = db.get_resource('nova2') + + self.assertEqual(nova1.args['ip'], node3.args['ip']) + self.assertEqual(nova2.args['ip'], node4.args['ip']) + + def test_haproxy_nova_config(self): + nova1 = db.get_resource('nova1') + nova2 = db.get_resource('nova2') + haproxy_nova_config = db.get_resource('haproxy_nova_config') + + self.assertDictEqual( + haproxy_nova_config.args['servers'], + { + 'nova1': nova1.args['ip'], + 'nova2': nova2.args['ip'], + } + ) + + def test_haproxy(self): + node5 = db.get_resource('node5') + haproxy = db.get_resource('haproxy') + + self.assertEqual(node5.args['ip'], haproxy.args['ip']) + + +def main(): + loader = unittest.TestLoader() + suite = loader.loadTestsFromTestCase(TestHAProxyDeployment) + unittest.TextTestRunner().run(suite) \ No newline at end of file diff --git a/x/deployment.py b/x/deployment.py new file mode 100644 index 00000000..9cdd3d3e --- /dev/null +++ b/x/deployment.py @@ -0,0 +1,50 @@ +# Deploying stuff from YAML definition + +import imp +import os +import shutil +import yaml + +#from x import actions as xa +from x import db +from x import resource as xr +from x import signals as xs + + +def deploy(filename): + with open(filename) as f: + config = yaml.load(f) + + workdir = config['workdir'] + resource_save_path = os.path.join(workdir, config['resource-save-path']) + + # Clean stuff first + clients_file = os.path.join(workdir, 'clients.json') + if os.path.exists(clients_file): + os.remove(clients_file) + shutil.rmtree(resource_save_path, ignore_errors=True) + os.makedirs(resource_save_path) + + # Create resources first + for resource_definition in config['resources']: + name = resource_definition['name'] + model = os.path.join(workdir, resource_definition['model']) + args = resource_definition.get('args', {}) + print 'Creating ', name, model, resource_save_path, args + xr.create(name, model, resource_save_path, args=args) + + # Create resource connections + for connection in config['connections']: + emitter = db.get_resource(connection['emitter']) + receiver = db.get_resource(connection['receiver']) + mapping = config.get('mapping') + print 'Connecting ', emitter.name, receiver.name, mapping + xs.connect(emitter, receiver, mapping=mapping) + + # Run all tests + if 'test-suite' in config: + #test_suite_path = os.path.join(workdir, config['test-suite']) + print 'Running tests from {}'.format(config['test-suite']) + #test_suite = imp.load_source('main', test_suite_path) + test_suite = __import__(config['test-suite'], {}, {}, ['main']) + test_suite.main() From 3be4f121b397c47ae2a3262a0bfab6ac68b441b0 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 19:39:15 +0200 Subject: [PATCH 31/87] haproxy-deployment haproxy connection fixes --- haproxy_deployment/haproxy-deployment.yaml | 4 ++-- x/deployment.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index 55b4a7ca..d854f501 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -159,8 +159,8 @@ connections: - emitter: haproxy_keystone_config receiver: haproxy mapping: - server: configs + servers: configs - emitter: haproxy_nova_config receiver: haproxy mapping: - server: configs + servers: configs diff --git a/x/deployment.py b/x/deployment.py index 9cdd3d3e..6614e425 100644 --- a/x/deployment.py +++ b/x/deployment.py @@ -37,7 +37,7 @@ def deploy(filename): for connection in config['connections']: emitter = db.get_resource(connection['emitter']) receiver = db.get_resource(connection['receiver']) - mapping = config.get('mapping') + mapping = connection.get('mapping') print 'Connecting ', emitter.name, receiver.name, mapping xs.connect(emitter, receiver, mapping=mapping) From d24614a788e2194a09d704e61ba9d34e896cc380 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 19:40:19 +0200 Subject: [PATCH 32/87] Remove unused code --- x/deployment.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/x/deployment.py b/x/deployment.py index 6614e425..2f2e3c8a 100644 --- a/x/deployment.py +++ b/x/deployment.py @@ -1,11 +1,9 @@ # Deploying stuff from YAML definition -import imp import os import shutil import yaml -#from x import actions as xa from x import db from x import resource as xr from x import signals as xs @@ -43,8 +41,6 @@ def deploy(filename): # Run all tests if 'test-suite' in config: - #test_suite_path = os.path.join(workdir, config['test-suite']) print 'Running tests from {}'.format(config['test-suite']) - #test_suite = imp.load_source('main', test_suite_path) test_suite = __import__(config['test-suite'], {}, {}, ['main']) test_suite.main() From 3cbbf457a12f354c24576ec2e8b8f5eba33dec4e Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 20 Apr 2015 19:41:28 +0200 Subject: [PATCH 33/87] Extend haproxy test case --- haproxy_deployment/haproxy_deployment.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index c7843889..838a056d 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -50,9 +50,18 @@ class TestHAProxyDeployment(unittest.TestCase): def test_haproxy(self): node5 = db.get_resource('node5') + haproxy_keystone_config = db.get_resource('haproxy_keystone_config') + haproxy_nova_config = db.get_resource('haproxy_nova_config') haproxy = db.get_resource('haproxy') self.assertEqual(node5.args['ip'], haproxy.args['ip']) + self.assertItemsEqual( + haproxy.args['configs'], + { + 'haproxy_keystone_config': haproxy_keystone_config.args['servers'], + 'haproxy_nova_config': haproxy_nova_config.args['servers'], + } + ) def main(): From a9d2ca73a2b4a2092f0377458e0b33415b5a12d2 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Tue, 21 Apr 2015 09:55:43 +0200 Subject: [PATCH 34/87] Vagrant fixes - added more machines - fixed ssh key paths (works for me with Vagrant 1.7.2) - added data_container/echo.yml ansbile job to test ansible ssh --- Vagrantfile | 45 ++++++++++++++++++++- ansible.cfg | 2 + haproxy_deployment/haproxy-deployment.yaml | 10 ++--- main.yml | 3 ++ x/handlers/ansible.py | 3 ++ x/resources/data_container/actions/echo.yml | 5 +++ 6 files changed, 61 insertions(+), 7 deletions(-) create mode 100644 ansible.cfg create mode 100644 x/resources/data_container/actions/echo.yml diff --git a/Vagrantfile b/Vagrantfile index c57d5e05..61641f61 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -18,11 +18,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.vm.define "solar-dev", primary: true do |guest1| guest1.vm.provision "shell", inline: init_script, privileged: true guest1.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private" + guest1.vm.provision "file", source: "ansible.cfg", destination: "/home/vagrant/.ansible.cfg" guest1.vm.network "private_network", ip: "10.0.0.2" guest1.vm.host_name = "solar-dev" guest1.vm.provider :virtualbox do |v| - v.customize ["modifyvm", :id, "--memory", 2048] + v.customize ["modifyvm", :id, "--memory", 256] v.name = "solar-dev" end end @@ -32,9 +33,49 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| guest2.vm.host_name = "solar-dev2" guest2.vm.provider :virtualbox do |v| - v.customize ["modifyvm", :id, "--memory", 1024] + v.customize ["modifyvm", :id, "--memory", 256] v.name = "solar-dev2" end end + config.vm.define "solar-dev3" do |guest3| + guest3.vm.network "private_network", ip: "10.0.0.4" + guest3.vm.host_name = "solar-dev3" + + guest3.vm.provider :virtualbox do |v| + v.customize ["modifyvm", :id, "--memory", 256] + v.name = "solar-dev3" + end + end + + config.vm.define "solar-dev4" do |guest4| + guest4.vm.network "private_network", ip: "10.0.0.5" + guest4.vm.host_name = "solar-dev4" + + guest4.vm.provider :virtualbox do |v| + v.customize ["modifyvm", :id, "--memory", 256] + v.name = "solar-dev4" + end + end + + config.vm.define "solar-dev5" do |guest5| + guest5.vm.network "private_network", ip: "10.0.0.6" + guest5.vm.host_name = "solar-dev5" + + guest5.vm.provider :virtualbox do |v| + v.customize ["modifyvm", :id, "--memory", 256] + v.name = "solar-dev5" + end + end + + config.vm.define "solar-dev6" do |guest6| + guest6.vm.network "private_network", ip: "10.0.0.7" + guest6.vm.host_name = "solar-dev5" + + guest6.vm.provider :virtualbox do |v| + v.customize ["modifyvm", :id, "--memory", 256] + v.name = "solar-dev6" + end + end + end diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 00000000..14c80651 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +host_key_checking = False diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index d854f501..4fe164dd 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -9,31 +9,31 @@ resources: model: x/resources/ro_node/ args: ip: 10.0.0.3 - ssh_key: /vagrant/tmp/keys/ssh_private + ssh_key: /vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key ssh_user: vagrant - name: node2 model: x/resources/ro_node/ args: ip: 10.0.0.4 - ssh_key: /vagrant/tmp/keys/ssh_private + ssh_key: /vagrant/.vagrant/machines/solar-dev3/virtualbox/private_key ssh_user: vagrant - name: node3 model: x/resources/ro_node/ args: ip: 10.0.0.5 - ssh_key: /vagrant/tmp/keys/ssh_private + ssh_key: /vagrant/.vagrant/machines/solar-dev4/virtualbox/private_key ssh_user: vagrant - name: node4 model: x/resources/ro_node/ args: ip: 10.0.0.6 - ssh_key: /vagrant/tmp/keys/ssh_private + ssh_key: /vagrant/.vagrant/machines/solar-dev5/virtualbox/private_key ssh_user: vagrant - name: node5 model: x/resources/ro_node/ args: ip: 10.0.0.7 - ssh_key: /vagrant/tmp/keys/ssh_private + ssh_key: /vagrant/.vagrant/machines/solar-dev6/virtualbox/private_key ssh_user: vagrant - name: mariadb_keystone1_data diff --git a/main.yml b/main.yml index 17fc5b8d..e5876ba2 100644 --- a/main.yml +++ b/main.yml @@ -12,6 +12,9 @@ - apt: name=ipython state=present - apt: name=python-pudb state=present + # requirements + - shell: pip install -r /vagrant/requirements.txt + # Graph drawing - apt: name=python-matplotlib state=present diff --git a/x/handlers/ansible.py b/x/handlers/ansible.py index 881f7006..7ea9c76d 100644 --- a/x/handlers/ansible.py +++ b/x/handlers/ansible.py @@ -9,6 +9,8 @@ class Ansible(BaseHandler): def action(self, resource, action_name): inventory_file = self._create_inventory(resource) playbook_file = self._create_playbook(resource, action_name) + print 'inventory_file', inventory_file + print 'playbook_file', playbook_file subprocess.call(['ansible-playbook', '-i', inventory_file, playbook_file]) #def _get_connection(self, resource): @@ -23,6 +25,7 @@ class Ansible(BaseHandler): print user print ssh_key inventory = inventory.format(host, host, user, ssh_key) + print inventory directory = self.dirs[r.name] inventory_path = os.path.join(directory, 'inventory') with open(inventory_path, 'w') as inv: diff --git a/x/resources/data_container/actions/echo.yml b/x/resources/data_container/actions/echo.yml new file mode 100644 index 00000000..59b540ae --- /dev/null +++ b/x/resources/data_container/actions/echo.yml @@ -0,0 +1,5 @@ + +- hosts: [{{ ip }}] + sudo: yes + tasks: + - shell: echo `/sbin/ifconfig` From 06fe908b3fe7e1e0708b0f914005687c7907686d Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Tue, 21 Apr 2015 09:59:13 +0200 Subject: [PATCH 35/87] README: added info about 'deploy' command --- README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/README.md b/README.md index f9212223..5f756ec7 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,20 @@ # x +## HAProxy deployment + +``` +cd /vagrant +python cli.py deploy haproxy_deployment/haproxy-deployment.yaml +``` + +or from Python shell: + +``` +from x import deployment + +deployment.deploy('/vagrant/haproxy_deployment/haproxy-deployment.yaml') +``` + ## Usage: Creating resources: From 4de8384af38f0359a19867cccf6327c516df88ac Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Tue, 21 Apr 2015 11:00:49 +0200 Subject: [PATCH 36/87] Some signals/connections refactoring --- x/signals.py | 51 ++++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/x/signals.py b/x/signals.py index 604f9717..fdb7c2ac 100644 --- a/x/signals.py +++ b/x/signals.py @@ -15,7 +15,7 @@ CLIENTS = utils.read_config_file(CLIENTS_CONFIG_KEY) def guess_mapping(emitter, receiver): """Guess connection mapping between emitter and receiver. - Suppose emitter and receiver have inputs: + Suppose emitter and receiver have common inputs: ip, ssh_key, ssh_user Then we return a connection mapping like this: @@ -39,9 +39,7 @@ def guess_mapping(emitter, receiver): def connect(emitter, receiver, mapping=None): - #mapping = mapping or {} guessed = guess_mapping(emitter, receiver) - #guessed.update(mapping) mapping = mapping or guessed for src, dst in mapping.items(): @@ -50,18 +48,22 @@ def connect(emitter, receiver, mapping=None): if receiver.input_types.get(dst, '') != 'list': disconnect_receiver_by_input(receiver, dst) - CLIENTS.setdefault(emitter.name, {}) - CLIENTS[emitter.name].setdefault(src, []) - CLIENTS[emitter.name][src].append((receiver.name, dst)) - - # Copy emitter's values to receiver - if src in emitter.args: - receiver.update({dst: emitter.args[src]}, emitter=emitter) + connect_src_dst(emitter, src, receiver, dst) receiver.save() utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) +def connect_src_dst(emitter, src, receiver, dst): + CLIENTS.setdefault(emitter.name, {}) + CLIENTS[emitter.name].setdefault(src, []) + CLIENTS[emitter.name][src].append((receiver.name, dst)) + + # Copy emitter's values to receiver + if src in emitter.args: + receiver.update({dst: emitter.args[src]}, emitter=emitter) + + def disconnect(emitter, receiver): for src, destinations in CLIENTS[emitter.name].items(): destinations = [ @@ -79,10 +81,7 @@ def disconnect(emitter, receiver): if k != emitter.name } - CLIENTS[emitter.name][src] = [ - destination for destination in destinations - if destination[0] != receiver.name - ] + disconnect_by_src(emitter, src, receiver) # Inputs might have changed receiver.save() @@ -97,11 +96,16 @@ def disconnect_receiver_by_input(receiver, input): :return: """ for emitter_name, inputs in CLIENTS.items(): - if input in inputs: - inputs[input] = [ - destination for destination in inputs[input] - if destination[0] != receiver.name - ] + emitter = db.get_resource(emitter_name) + disconnect_by_src(emitter, input, receiver) + + +def disconnect_by_src(emitter, src, receiver): + if src in CLIENTS[emitter.name]: + CLIENTS[emitter.name][src] = [ + destination for destination in CLIENTS[emitter.name][src] + if destination[0] != receiver.name + ] def notify(source, key, value): @@ -120,12 +124,9 @@ def notify(source, key, value): def assign_connections(receiver, connections): mappings = defaultdict(list) - #for key, dest in connections.iteritems(): - # resource, r_key = dest.split('.') - # resource = db.get_resource(resource) - # value = resource.args[r_key] - # receiver.args[key] = value - # mappings[resource].append((r_key, key)) + for key, dest in connections.iteritems(): + resource, r_key = dest.split('.') + mappings[resource].append((r_key, key)) for resource, r_mappings in mappings.iteritems(): connect(resource, receiver, r_mappings) From a1f3b26a7b0dfaf2c5855360c6e9c192f0414afb Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Tue, 21 Apr 2015 11:05:04 +0200 Subject: [PATCH 37/87] TODO updated --- TODO.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/TODO.md b/TODO.md index 8a75594f..00fb9923 100644 --- a/TODO.md +++ b/TODO.md @@ -2,14 +2,17 @@ - store all resource configurations somewhere globally (this is required to correctly perform an update on one resource and bubble down to all others) -- tags are kept in resource mata file (pkaminski) -- add 'list' connection type (pkaminski) - ansible handler (loles) - config templates - Deploy HAProxy, Keystone and MariaDB -- Handler also can requires some data, for example ansible: ip, ssh_key, ssh_user +- Handler also can require some data, for example ansible: ip, ssh_key, ssh_user +- tag-filtered graph generation +- separate resource for docker image -- this is e.g. to make automatic image removal + when some image is unused to conserve space # DONE +- tags are kept in resource mata file (pkaminski) +- add 'list' connection type (pkaminski) - connections are made automaticly(pkaminski) - graph is build from CLIENT dict, clients are stored in JSON file (pkaminski) - cli (pkaminski) From f70a3f36f10698399d84d775071cce12fe4c2208 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Tue, 21 Apr 2015 12:02:08 +0200 Subject: [PATCH 38/87] Fix resource.py input args so that validation is performed --- haproxy_deployment/haproxy-deployment.yaml | 53 ++++++++++++++++++---- haproxy_deployment/haproxy_deployment.py | 14 +++--- x/resource.py | 4 +- x/resources/docker_container/meta.yaml | 5 ++ x/resources/haproxy/meta.yaml | 1 + x/resources/keystone/meta.yaml | 1 + x/resources/nova/meta.yaml | 1 + 7 files changed, 60 insertions(+), 19 deletions(-) diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index 4fe164dd..bda416b5 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -58,12 +58,14 @@ resources: model: x/resources/keystone/ args: ip: + image: TEST ssh_user: ssh_key: - name: keystone2 model: x/resources/keystone/ args: ip: + image: TEST ssh_user: ssh_key: - name: haproxy_keystone_config @@ -95,12 +97,14 @@ resources: model: x/resources/nova/ args: ip: + image: TEST ssh_user: ssh_key: - name: nova2 model: x/resources/nova/ args: ip: + image: TEST ssh_user: ssh_key: - name: haproxy_nova_config @@ -110,13 +114,30 @@ resources: ssh_user: ssh_key: + #- name: haproxy-config-container + # model: x/resources/data_container/ + # args: + # ip: + # image: haproxy-config + # export_volumes: + # - haproxy-config + #- name: haproxy-config + # model: x/resources/haproxy/ + # args: + # ip: + # configs: {} + # ssh_user: + # ssh_key: - name: haproxy - model: x/resources/haproxy/ + model: x/resources/docker_container args: ip: - configs: {} + image: haproxy ssh_user: ssh_key: + host_binds: + - /etc/haproxy: /vagrant/haproxy-etc + volume_binds: connections: @@ -154,13 +175,25 @@ connections: mapping: ip: servers + # HAProxy config container + #- emitter: node5 + # receiver: haproxy-config-container + #- emitter: haproxy-config-container + # receiver: haproxy-config + #- emitter: haproxy_keystone_config + # receiver: haproxy-config + # mapping: + # servers: configs + #- emitter: haproxy_nova_config + # receiver: haproxy-config + # mapping: + # servers: configs + + # HAProxy service - emitter: node5 receiver: haproxy - - emitter: haproxy_keystone_config - receiver: haproxy - mapping: - servers: configs - - emitter: haproxy_nova_config - receiver: haproxy - mapping: - servers: configs + + #- emitter: haproxy-config + # receiver: haproxy + # mapping: + diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index 838a056d..606bf1c3 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -55,13 +55,13 @@ class TestHAProxyDeployment(unittest.TestCase): haproxy = db.get_resource('haproxy') self.assertEqual(node5.args['ip'], haproxy.args['ip']) - self.assertItemsEqual( - haproxy.args['configs'], - { - 'haproxy_keystone_config': haproxy_keystone_config.args['servers'], - 'haproxy_nova_config': haproxy_nova_config.args['servers'], - } - ) + #self.assertItemsEqual( + # haproxy.args['configs'], + # { + # 'haproxy_keystone_config': haproxy_keystone_config.args['servers'], + # 'haproxy_nova_config': haproxy_nova_config.args['servers'], + # } + #) def main(): diff --git a/x/resource.py b/x/resource.py index 8644a61c..4500e4c3 100644 --- a/x/resource.py +++ b/x/resource.py @@ -21,6 +21,7 @@ class Resource(object): self.requires = metadata['input'].keys() self._validate_args(args) self.args = args + self.metadata['input'] = args self.input_types = metadata.get('input-types', {}) self.changed = [] self.tags = tags or [] @@ -92,7 +93,6 @@ def create(name, base_path, dest_path, args, connections={}): meta['id'] = name meta['version'] = '1.0.0' meta['actions'] = {} - meta['input'] = args meta['tags'] = [] if os.path.exists(actions_path): @@ -102,7 +102,7 @@ def create(name, base_path, dest_path, args, connections={}): resource = Resource(name, meta, args, dest_path) signals.assign_connections(resource, connections) - #save + # save shutil.copytree(base_path, dest_path) resource.save() db.resource_add(name, resource) diff --git a/x/resources/docker_container/meta.yaml b/x/resources/docker_container/meta.yaml index 4170f334..ad46d110 100644 --- a/x/resources/docker_container/meta.yaml +++ b/x/resources/docker_container/meta.yaml @@ -2,5 +2,10 @@ id: container handler: ansible version: 1.0.0 input: + ip: image: + host_binds: volume_binds: +input-types: + host_binds: list + volume_binds: list diff --git a/x/resources/haproxy/meta.yaml b/x/resources/haproxy/meta.yaml index 63c64ebf..136a855d 100644 --- a/x/resources/haproxy/meta.yaml +++ b/x/resources/haproxy/meta.yaml @@ -2,6 +2,7 @@ id: haproxy handler: ansible version: 1.0.0 input: + ip: configs: input-types: configs: list diff --git a/x/resources/keystone/meta.yaml b/x/resources/keystone/meta.yaml index 064045fa..80bcb326 100644 --- a/x/resources/keystone/meta.yaml +++ b/x/resources/keystone/meta.yaml @@ -2,4 +2,5 @@ id: keystone handler: ansible version: 1.0.0 input: + ip: image: garland/docker-openstack-keystone diff --git a/x/resources/nova/meta.yaml b/x/resources/nova/meta.yaml index 180686ed..e6a861c0 100644 --- a/x/resources/nova/meta.yaml +++ b/x/resources/nova/meta.yaml @@ -2,4 +2,5 @@ id: nova handler: ansible version: 1.0.0 input: + ip: image: # TODO From f64b8f88ef483fb9d0380cb94c3a97a9561f91ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 21 Apr 2015 11:49:49 +0000 Subject: [PATCH 39/87] Mariadb resource --- x/resources/docker/docker.yml | 10 ---------- x/resources/mariadb/actions/remove.yml | 8 +++++--- x/resources/mariadb/actions/run.yml | 12 +++++++++--- x/resources/mariadb/meta.yaml | 8 ++++++-- 4 files changed, 20 insertions(+), 18 deletions(-) delete mode 100644 x/resources/docker/docker.yml diff --git a/x/resources/docker/docker.yml b/x/resources/docker/docker.yml deleted file mode 100644 index 3b704056..00000000 --- a/x/resources/docker/docker.yml +++ /dev/null @@ -1,10 +0,0 @@ -id: docker -type: resource -handler: ansible -version: v1 -actions: - run: simple/docker/run.yml - remove: simple/docker/remove.yml -input: - base_image: ubuntu -tags: [n/1] diff --git a/x/resources/mariadb/actions/remove.yml b/x/resources/mariadb/actions/remove.yml index d3c3149f..cb1bc73b 100644 --- a/x/resources/mariadb/actions/remove.yml +++ b/x/resources/mariadb/actions/remove.yml @@ -1,6 +1,8 @@ - - hosts: [{{ ip }}] sudo: yes tasks: - - shell: docker stop {{ name }} - - shell: docker rm {{ name }} + - name: mariadb container + docker: + name: {{ name }} + image: {{ image }} + state: absent diff --git a/x/resources/mariadb/actions/run.yml b/x/resources/mariadb/actions/run.yml index 90ae50dc..b1e9d87f 100644 --- a/x/resources/mariadb/actions/run.yml +++ b/x/resources/mariadb/actions/run.yml @@ -1,6 +1,12 @@ - - hosts: [{{ ip }}] sudo: yes tasks: - - shell: docker run -d --net="host" --privileged \ - --name {{ name }} {{ image }} + - name: mariadb container + docker: + name: {{ name }} + image: {{ image }} + state: running + ports: + - {{ port }}:3306 + env: + MYSQL_ROOT_PASSWORD: {{ root_password }} diff --git a/x/resources/mariadb/meta.yaml b/x/resources/mariadb/meta.yaml index 8fcdb87c..6fa200d0 100644 --- a/x/resources/mariadb/meta.yaml +++ b/x/resources/mariadb/meta.yaml @@ -2,5 +2,9 @@ id: mariadb handler: ansible version: 1.0.0 input: - image: tutum/mariadq -tags: [n/1] + image: + root_password: + port: + ip: + ssh_key: + ssh_user: From 3fdb11f0936c95270b71e3d62bee092a699e8a44 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Tue, 21 Apr 2015 14:51:39 +0200 Subject: [PATCH 40/87] First working version of haproxy Docker --- Vagrantfile | 7 +++++- haproxy.cfg | 26 ++++++++++++++++++++ haproxy_deployment/haproxy-deployment.yaml | 4 +-- haproxy_deployment/haproxy_deployment.py | 2 ++ x/resources/docker_container/actions/run.yml | 17 +++++++++++-- x/resources/docker_container/meta.yaml | 2 ++ x/resources/haproxy/meta.yaml | 2 ++ 7 files changed, 55 insertions(+), 5 deletions(-) create mode 100644 haproxy.cfg diff --git a/Vagrantfile b/Vagrantfile index 61641f61..bf248c30 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -29,6 +29,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end config.vm.define "solar-dev2" do |guest2| + guest2.vm.provision "shell", inline: init_script, privileged: true guest2.vm.network "private_network", ip: "10.0.0.3" guest2.vm.host_name = "solar-dev2" @@ -39,6 +40,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end config.vm.define "solar-dev3" do |guest3| + guest3.vm.provision "shell", inline: init_script, privileged: true guest3.vm.network "private_network", ip: "10.0.0.4" guest3.vm.host_name = "solar-dev3" @@ -49,6 +51,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end config.vm.define "solar-dev4" do |guest4| + guest4.vm.provision "shell", inline: init_script, privileged: true guest4.vm.network "private_network", ip: "10.0.0.5" guest4.vm.host_name = "solar-dev4" @@ -59,6 +62,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end config.vm.define "solar-dev5" do |guest5| + guest5.vm.provision "shell", inline: init_script, privileged: true guest5.vm.network "private_network", ip: "10.0.0.6" guest5.vm.host_name = "solar-dev5" @@ -69,8 +73,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end config.vm.define "solar-dev6" do |guest6| + guest6.vm.provision "shell", inline: init_script, privileged: true guest6.vm.network "private_network", ip: "10.0.0.7" - guest6.vm.host_name = "solar-dev5" + guest6.vm.host_name = "solar-dev6" guest6.vm.provider :virtualbox do |v| v.customize ["modifyvm", :id, "--memory", 256] diff --git a/haproxy.cfg b/haproxy.cfg new file mode 100644 index 00000000..736a5185 --- /dev/null +++ b/haproxy.cfg @@ -0,0 +1,26 @@ +global + log 127.0.0.1 local0 + log 127.0.0.1 local1 notice + maxconn 4096 + tune.ssl.default-dh-param 2048 + pidfile /var/run/haproxy.pid + user haproxy + group haproxy + daemon + stats socket /var/run/haproxy.stats level admin + ssl-default-bind-options no-sslv3 +defaults + log global + mode http + option redispatch + option httplog + option dontlognull + option forwardfor + timeout connect 5000 + timeout client 50000 + timeout server 50000 +frontend default_frontend + bind 0.0.0.0:80 + default_backend default_service +backend default_service + balance roundrobin diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index bda416b5..e90a2103 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -132,11 +132,11 @@ resources: model: x/resources/docker_container args: ip: - image: haproxy + image: tutum/haproxy ssh_user: ssh_key: host_binds: - - /etc/haproxy: /vagrant/haproxy-etc + - /etc/haproxy: /etc/haproxy volume_binds: diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index 606bf1c3..4cc641dd 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -55,6 +55,8 @@ class TestHAProxyDeployment(unittest.TestCase): haproxy = db.get_resource('haproxy') self.assertEqual(node5.args['ip'], haproxy.args['ip']) + self.assertEqual(node5.args['ssh_key'], haproxy.args['ssh_key']) + self.assertEqual(node5.args['ssh_user'], haproxy.args['ssh_user']) #self.assertItemsEqual( # haproxy.args['configs'], # { diff --git a/x/resources/docker_container/actions/run.yml b/x/resources/docker_container/actions/run.yml index 90ae50dc..71af55f2 100644 --- a/x/resources/docker_container/actions/run.yml +++ b/x/resources/docker_container/actions/run.yml @@ -2,5 +2,18 @@ - hosts: [{{ ip }}] sudo: yes tasks: - - shell: docker run -d --net="host" --privileged \ - --name {{ name }} {{ image }} + - apt: name=python-pip state=present + - shell: pip install docker-py + - service: name=docker state=started + - file: path=/etc/haproxy/haproxy.cfg state=touch + - template: src=/vagrant/haproxy.cfg dest=/etc/haproxy/haproxy.cfg + - docker: + name: {{ name }} + image: {{ image }} + state: running + volumes: + {% for bind in host_binds %} + {% for src, dst in bind.items () %} + - {{ src }}:{{ dst }} + {% endfor %} + {% endfor %} diff --git a/x/resources/docker_container/meta.yaml b/x/resources/docker_container/meta.yaml index ad46d110..8a104815 100644 --- a/x/resources/docker_container/meta.yaml +++ b/x/resources/docker_container/meta.yaml @@ -6,6 +6,8 @@ input: image: host_binds: volume_binds: + ssh_user: + ssh_key: input-types: host_binds: list volume_binds: list diff --git a/x/resources/haproxy/meta.yaml b/x/resources/haproxy/meta.yaml index 136a855d..95d8773f 100644 --- a/x/resources/haproxy/meta.yaml +++ b/x/resources/haproxy/meta.yaml @@ -4,5 +4,7 @@ version: 1.0.0 input: ip: configs: + ssh_user: + ssh_key: input-types: configs: list From 98e5b3375be8f59c166719fdc905033448bac396 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Tue, 21 Apr 2015 20:15:09 +0200 Subject: [PATCH 41/87] Successful HAProxy deployment with config file - Some unit tests added --- haproxy.cfg | 30 +++- haproxy_deployment/haproxy-deployment.yaml | 59 +++++--- haproxy_deployment/haproxy_deployment.py | 42 +++++- x/db.py | 6 + x/deployment.py | 5 +- x/handlers/ansible.py | 5 +- x/resource.py | 11 +- x/resources/docker_container/actions/run.yml | 12 +- x/resources/haproxy/actions/run.yml | 20 ++- x/resources/haproxy/meta.yaml | 3 + x/resources/haproxy_config/meta.yaml | 2 + x/resources/keystone/meta.yaml | 1 + x/resources/nova/meta.yaml | 1 + x/signals.py | 11 ++ x/test/__init__.py | 1 + x/test/test_signals.py | 146 +++++++++++++++++++ 16 files changed, 308 insertions(+), 47 deletions(-) create mode 100644 x/test/__init__.py create mode 100644 x/test/test_signals.py diff --git a/haproxy.cfg b/haproxy.cfg index 736a5185..93401c17 100644 --- a/haproxy.cfg +++ b/haproxy.cfg @@ -9,6 +9,7 @@ global daemon stats socket /var/run/haproxy.stats level admin ssl-default-bind-options no-sslv3 + defaults log global mode http @@ -19,8 +20,27 @@ defaults timeout connect 5000 timeout client 50000 timeout server 50000 -frontend default_frontend - bind 0.0.0.0:80 - default_backend default_service -backend default_service - balance roundrobin + +#frontend default_frontend +# bind 0.0.0.0:80 +# default_backend default_service + +#backend default_service +# balance roundrobin + +{% for service in haproxy_services %} +listen {{ service['name'] }} 0.0.0.0:{{ service['servers'][0]['port'] }} + mode http + stats enable + stats uri /haproxy?stats + stats realm Strictly\ Private + stats auth A_Username:YourPassword + stats auth Another_User:passwd + balance roundrobin + option httpclose + option forwardfor + {% for server in service['servers'] %} + server {{ server['name'] }} {{ server['ip'] }}:{{ server['port'] }} check + {% endfor %} + +{% endfor %} diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index e90a2103..d755d17a 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -72,6 +72,7 @@ resources: model: x/resources/haproxy_config/ args: servers: {} + ports: {} ssh_user: ssh_key: @@ -111,6 +112,7 @@ resources: model: x/resources/haproxy_config/ args: servers: {} + ports: {} ssh_user: ssh_key: @@ -121,13 +123,14 @@ resources: # image: haproxy-config # export_volumes: # - haproxy-config - #- name: haproxy-config - # model: x/resources/haproxy/ - # args: - # ip: - # configs: {} - # ssh_user: - # ssh_key: + - name: haproxy-config + model: x/resources/haproxy/ + args: + ip: + configs: {} + configs_ports: {} + ssh_user: + ssh_key: - name: haproxy model: x/resources/docker_container args: @@ -135,9 +138,8 @@ resources: image: tutum/haproxy ssh_user: ssh_key: - host_binds: - - /etc/haproxy: /etc/haproxy - volume_binds: + host_binds: {} + volume_binds: {} connections: @@ -153,10 +155,12 @@ connections: receiver: haproxy_keystone_config mapping: ip: servers + port: ports - emitter: keystone2 receiver: haproxy_keystone_config mapping: ip: servers + port: ports - emitter: node3 receiver: mariadb_nova1_data @@ -170,28 +174,43 @@ connections: receiver: haproxy_nova_config mapping: ip: servers + port: ports - emitter: nova2 receiver: haproxy_nova_config mapping: ip: servers + port: ports # HAProxy config container + - emitter: node5 + receiver: haproxy-config + #- emitter: node5 # receiver: haproxy-config-container #- emitter: haproxy-config-container # receiver: haproxy-config - #- emitter: haproxy_keystone_config - # receiver: haproxy-config - # mapping: - # servers: configs - #- emitter: haproxy_nova_config - # receiver: haproxy-config - # mapping: - # servers: configs + - emitter: haproxy_keystone_config + receiver: haproxy-config + mapping: + ports: configs_ports + servers: configs + - emitter: haproxy_nova_config + receiver: haproxy-config + mapping: + ports: configs_ports + servers: configs + + - emitter: haproxy-config + receiver: haproxy + mapping: + ip: ip + ssh_user: ssh_user + ssh_key: ssh_key + config_dir: host_binds # HAProxy service - - emitter: node5 - receiver: haproxy + #- emitter: node5 + # receiver: haproxy #- emitter: haproxy-config # receiver: haproxy diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index 4cc641dd..d88e11ba 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -25,6 +25,13 @@ class TestHAProxyDeployment(unittest.TestCase): 'keystone2': keystone2.args['ip'], } ) + self.assertDictEqual( + haproxy_keystone_config.args['ports'], + { + 'keystone1': keystone1.args['port'], + 'keystone2': keystone2.args['port'], + } + ) def test_nova_config(self): node3 = db.get_resource('node3') @@ -47,23 +54,44 @@ class TestHAProxyDeployment(unittest.TestCase): 'nova2': nova2.args['ip'], } ) + self.assertDictEqual( + haproxy_nova_config.args['ports'], + { + 'nova1': nova1.args['port'], + 'nova2': nova2.args['port'], + } + ) def test_haproxy(self): node5 = db.get_resource('node5') haproxy_keystone_config = db.get_resource('haproxy_keystone_config') haproxy_nova_config = db.get_resource('haproxy_nova_config') haproxy = db.get_resource('haproxy') + haproxy_config = db.get_resource('haproxy-config') self.assertEqual(node5.args['ip'], haproxy.args['ip']) self.assertEqual(node5.args['ssh_key'], haproxy.args['ssh_key']) self.assertEqual(node5.args['ssh_user'], haproxy.args['ssh_user']) - #self.assertItemsEqual( - # haproxy.args['configs'], - # { - # 'haproxy_keystone_config': haproxy_keystone_config.args['servers'], - # 'haproxy_nova_config': haproxy_nova_config.args['servers'], - # } - #) + self.assertItemsEqual( + haproxy_config.args['configs'], + { + 'haproxy_keystone_config': haproxy_keystone_config.args['servers'], + 'haproxy_nova_config': haproxy_nova_config.args['servers'], + } + ) + self.assertItemsEqual( + haproxy_config.args['configs_ports'], + { + 'haproxy_keystone_config': haproxy_keystone_config.args['ports'], + 'haproxy_nova_config': haproxy_nova_config.args['ports'], + } + ) + self.assertItemsEqual( + { + 'haproxy-config': haproxy_config.args['config_dir'], + }, + haproxy.args['host_binds'] + ) def main(): diff --git a/x/db.py b/x/db.py index 90df6dfe..4ffcc490 100644 --- a/x/db.py +++ b/x/db.py @@ -11,3 +11,9 @@ def resource_add(key, value): def get_resource(key): return RESOURCE_DB.get(key, None) + + +def clear(): + global RESOURCE_DB + + RESOURCE_DB = {} diff --git a/x/deployment.py b/x/deployment.py index 2f2e3c8a..96bda097 100644 --- a/x/deployment.py +++ b/x/deployment.py @@ -17,9 +17,8 @@ def deploy(filename): resource_save_path = os.path.join(workdir, config['resource-save-path']) # Clean stuff first - clients_file = os.path.join(workdir, 'clients.json') - if os.path.exists(clients_file): - os.remove(clients_file) + db.clear() + xs.clear() shutil.rmtree(resource_save_path, ignore_errors=True) os.makedirs(resource_save_path) diff --git a/x/handlers/ansible.py b/x/handlers/ansible.py index 7ea9c76d..ff9640ef 100644 --- a/x/handlers/ansible.py +++ b/x/handlers/ansible.py @@ -1,6 +1,7 @@ # -*- coding: UTF-8 -*- import os import subprocess +import yaml from x.handlers.base import BaseHandler @@ -11,7 +12,9 @@ class Ansible(BaseHandler): playbook_file = self._create_playbook(resource, action_name) print 'inventory_file', inventory_file print 'playbook_file', playbook_file - subprocess.call(['ansible-playbook', '-i', inventory_file, playbook_file]) + call_args = ['ansible-playbook', '-i', inventory_file, playbook_file] + print 'EXECUTING: ', ' '.join(call_args) + subprocess.call(call_args) #def _get_connection(self, resource): # return {'ssh_user': '', diff --git a/x/resource.py b/x/resource.py index 4500e4c3..895105d1 100644 --- a/x/resource.py +++ b/x/resource.py @@ -19,7 +19,7 @@ class Resource(object): self.metadata = metadata self.actions = metadata['actions'].keys() if metadata['actions'] else None self.requires = metadata['input'].keys() - self._validate_args(args) + self._validate_args(args, metadata['input']) self.args = args self.metadata['input'] = args self.input_types = metadata.get('input-types', {}) @@ -63,10 +63,15 @@ class Resource(object): else: raise Exception('Uuups, action is not available') - def _validate_args(self, args): + def _validate_args(self, args, inputs): for req in self.requires: if req not in args: - raise Exception('Requirement `{0}` is missing in args'.format(req)) + # If metadata input is filled with a value, use it as default + # and don't report an error + if inputs.get(req): + args[req] = inputs[req] + else: + raise Exception('Requirement `{0}` is missing in args'.format(req)) # TODO: versioning def save(self): diff --git a/x/resources/docker_container/actions/run.yml b/x/resources/docker_container/actions/run.yml index 71af55f2..e02c6e39 100644 --- a/x/resources/docker_container/actions/run.yml +++ b/x/resources/docker_container/actions/run.yml @@ -5,15 +5,15 @@ - apt: name=python-pip state=present - shell: pip install docker-py - service: name=docker state=started - - file: path=/etc/haproxy/haproxy.cfg state=touch - - template: src=/vagrant/haproxy.cfg dest=/etc/haproxy/haproxy.cfg - docker: name: {{ name }} image: {{ image }} state: running volumes: - {% for bind in host_binds %} - {% for src, dst in bind.items () %} - - {{ src }}:{{ dst }} - {% endfor %} + # TODO: host_binds might need more work + # Currently it's not that trivial to pass custom src: dst here + # (when a config variable is passed here from other resource) + # so we mount it to the same directory as on host + {% for emitter, bind in host_binds.items() %} + - {{ bind }}:{{ bind }} {% endfor %} diff --git a/x/resources/haproxy/actions/run.yml b/x/resources/haproxy/actions/run.yml index e223fe8f..56dbdac8 100644 --- a/x/resources/haproxy/actions/run.yml +++ b/x/resources/haproxy/actions/run.yml @@ -1,6 +1,22 @@ # TODO - hosts: [{{ ip }}] sudo: yes + vars: + config_dir: {{ config_dir }} + haproxy_ip: {{ ip }} + haproxy_services: + {% for service, servers in configs.items() %} + - name: {{ service }} + servers: + {% for name, ip in servers.items() %} + - name: {{ name }} + ip: {{ ip }} + port: {{ configs_ports[service][name] }} + {% endfor %} + {% endfor %} tasks: - - shell: docker run -d --net="host" --privileged \ - --name {{ name }} {{ image }} + - apt: name=python-pip state=present + - shell: pip install docker-py + - service: name=docker state=started + - file: path=/etc/haproxy/haproxy.cfg state=touch + - template: src=/vagrant/haproxy.cfg dest=/etc/haproxy/haproxy.cfg diff --git a/x/resources/haproxy/meta.yaml b/x/resources/haproxy/meta.yaml index 95d8773f..5459ee0f 100644 --- a/x/resources/haproxy/meta.yaml +++ b/x/resources/haproxy/meta.yaml @@ -3,8 +3,11 @@ handler: ansible version: 1.0.0 input: ip: + config_dir: /etc/haproxy configs: + configs_ports: ssh_user: ssh_key: input-types: configs: list + configs_ports: list diff --git a/x/resources/haproxy_config/meta.yaml b/x/resources/haproxy_config/meta.yaml index 0c767796..48b2d171 100644 --- a/x/resources/haproxy_config/meta.yaml +++ b/x/resources/haproxy_config/meta.yaml @@ -2,6 +2,8 @@ id: haproxy_config handler: ansible version: 1.0.0 input: + ports: servers: input-types: + ports: list servers: list diff --git a/x/resources/keystone/meta.yaml b/x/resources/keystone/meta.yaml index 80bcb326..404e2c7e 100644 --- a/x/resources/keystone/meta.yaml +++ b/x/resources/keystone/meta.yaml @@ -3,4 +3,5 @@ handler: ansible version: 1.0.0 input: ip: + port: 5000 image: garland/docker-openstack-keystone diff --git a/x/resources/nova/meta.yaml b/x/resources/nova/meta.yaml index e6a861c0..0591a410 100644 --- a/x/resources/nova/meta.yaml +++ b/x/resources/nova/meta.yaml @@ -3,4 +3,5 @@ handler: ansible version: 1.0.0 input: ip: + port: 8774 image: # TODO diff --git a/x/signals.py b/x/signals.py index fdb7c2ac..e5827962 100644 --- a/x/signals.py +++ b/x/signals.py @@ -2,6 +2,7 @@ from collections import defaultdict import itertools import networkx as nx +import os import db @@ -12,6 +13,16 @@ CLIENTS_CONFIG_KEY = 'clients-data-file' CLIENTS = utils.read_config_file(CLIENTS_CONFIG_KEY) +def clear(): + global CLIENTS + + CLIENTS = {} + + path = utils.read_config()[CLIENTS_CONFIG_KEY] + if os.path.exists(path): + os.remove(path) + + def guess_mapping(emitter, receiver): """Guess connection mapping between emitter and receiver. diff --git a/x/test/__init__.py b/x/test/__init__.py new file mode 100644 index 00000000..4bf2011a --- /dev/null +++ b/x/test/__init__.py @@ -0,0 +1 @@ +__author__ = 'przemek' diff --git a/x/test/test_signals.py b/x/test/test_signals.py new file mode 100644 index 00000000..acf91a85 --- /dev/null +++ b/x/test/test_signals.py @@ -0,0 +1,146 @@ +import os +import shutil +import tempfile +import unittest +import yaml + +from x import db +from x import resource as xr +from x import signals as xs + + +class TestListInput(unittest.TestCase): + def setUp(self): + self.storage_dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.storage_dir) + db.clear() + xs.clear() + + def make_resource_meta(self, meta_yaml): + meta = yaml.load(meta_yaml) + + path = os.path.join(self.storage_dir, meta['id']) + os.makedirs(path) + with open(os.path.join(path, 'meta.yaml'), 'w') as f: + f.write(meta_yaml) + + return path + + def create_resource(self, name, src, args): + dst = os.path.join(self.storage_dir, 'rs', name) + os.makedirs(dst) + + return xr.create(name, src, dst, args) + + def test_list_input_single(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + """) + list_input_single_meta_dir = self.make_resource_meta(""" +id: list-input-single +handler: ansible +version: 1.0.0 +input: + ips: +input-types: + ips: list + """) + + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'ip': '10.0.0.1'} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'ip': '10.0.0.2'} + ) + list_input_single = self.create_resource( + 'list-input-single', list_input_single_meta_dir, {'ips': {}} + ) + + xs.connect(sample1, list_input_single, mapping={'ip': 'ips'}) + self.assertItemsEqual( + list_input_single.args['ips'], + { + 'sample1': sample1.args['ip'], + } + ) + + xs.connect(sample2, list_input_single, mapping={'ip': 'ips'}) + self.assertItemsEqual( + list_input_single.args['ips'], + { + 'sample1': sample1.args['ip'], + 'sample2': sample2.args['ip'], + } + ) + + + def test_list_input_multi(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + port: + """) + list_input_multi_meta_dir = self.make_resource_meta(""" +id: list-input-multi +handler: ansible +version: 1.0.0 +input: + ips: + ports: +input-types: + ips: list + ports: list + """) + + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'ip': '10.0.0.1', 'port': '1000'} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'ip': '10.0.0.2', 'port': '1001'} + ) + list_input_multi = self.create_resource( + 'list-input-multi', list_input_multi_meta_dir, {'ips': {}, 'ports': {}} + ) + + xs.connect(sample1, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) + self.assertItemsEqual( + list_input_multi.args['ips'], + { + 'sample1': sample1.args['ip'], + } + ) + self.assertItemsEqual( + list_input_multi.args['ports'], + { + 'sample1': sample1.args['port'], + } + ) + + xs.connect(sample2, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) + self.assertItemsEqual( + list_input_multi.args['ips'], + { + 'sample1': sample1.args['ip'], + 'sample2': sample2.args['ip'], + } + ) + self.assertItemsEqual( + list_input_multi.args['ports'], + { + 'sample1': sample1.args['port'], + 'sample2': sample2.args['port'], + } + ) + + +if __name__ == '__main__': + unittest.main() From a8ae9cdf263e8f5ec1b85397282507368238a164 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 09:13:17 +0200 Subject: [PATCH 42/87] HAProxy deployment - Added ports mapping to Docker container - tests refactored --- haproxy_deployment/haproxy-deployment.yaml | 22 +-------- haproxy_deployment/haproxy_deployment.py | 4 ++ x/resources/docker_container/actions/run.yml | 20 +++++--- x/resources/docker_container/meta.yaml | 4 +- x/test/base.py | 36 ++++++++++++++ x/test/test_signals.py | 52 +++++++++----------- 6 files changed, 81 insertions(+), 57 deletions(-) create mode 100644 x/test/base.py diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index d755d17a..aac1d3b3 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -116,13 +116,6 @@ resources: ssh_user: ssh_key: - #- name: haproxy-config-container - # model: x/resources/data_container/ - # args: - # ip: - # image: haproxy-config - # export_volumes: - # - haproxy-config - name: haproxy-config model: x/resources/haproxy/ args: @@ -136,6 +129,7 @@ resources: args: ip: image: tutum/haproxy + ports: {} ssh_user: ssh_key: host_binds: {} @@ -185,10 +179,6 @@ connections: - emitter: node5 receiver: haproxy-config - #- emitter: node5 - # receiver: haproxy-config-container - #- emitter: haproxy-config-container - # receiver: haproxy-config - emitter: haproxy_keystone_config receiver: haproxy-config mapping: @@ -204,15 +194,7 @@ connections: receiver: haproxy mapping: ip: ip + configs_ports: ports ssh_user: ssh_user ssh_key: ssh_key config_dir: host_binds - - # HAProxy service - #- emitter: node5 - # receiver: haproxy - - #- emitter: haproxy-config - # receiver: haproxy - # mapping: - diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index d88e11ba..47dca192 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -92,6 +92,10 @@ class TestHAProxyDeployment(unittest.TestCase): }, haproxy.args['host_binds'] ) + self.assertItemsEqual( + haproxy.args['ports'], + haproxy_config.args['configs_ports'], + ) def main(): diff --git a/x/resources/docker_container/actions/run.yml b/x/resources/docker_container/actions/run.yml index e02c6e39..f767054e 100644 --- a/x/resources/docker_container/actions/run.yml +++ b/x/resources/docker_container/actions/run.yml @@ -9,11 +9,17 @@ name: {{ name }} image: {{ image }} state: running + ports: + {% for name, ports_dict in ports.items() %} + # TODO: this is ugly + # {{ name }} + - {{ ports_dict.values()[0] }}:{{ ports_dict.values()[0] }} + {% endfor %} volumes: - # TODO: host_binds might need more work - # Currently it's not that trivial to pass custom src: dst here - # (when a config variable is passed here from other resource) - # so we mount it to the same directory as on host - {% for emitter, bind in host_binds.items() %} - - {{ bind }}:{{ bind }} - {% endfor %} + # TODO: host_binds might need more work + # Currently it's not that trivial to pass custom src: dst here + # (when a config variable is passed here from other resource) + # so we mount it to the same directory as on host + {% for emitter, bind in host_binds.items() %} + - {{ bind }}:{{ bind }} + {% endfor %} diff --git a/x/resources/docker_container/meta.yaml b/x/resources/docker_container/meta.yaml index 8a104815..182c872c 100644 --- a/x/resources/docker_container/meta.yaml +++ b/x/resources/docker_container/meta.yaml @@ -3,11 +3,13 @@ handler: ansible version: 1.0.0 input: ip: - image: + image: + ports: host_binds: volume_binds: ssh_user: ssh_key: input-types: + ports: host_binds: list volume_binds: list diff --git a/x/test/base.py b/x/test/base.py new file mode 100644 index 00000000..6dbceb97 --- /dev/null +++ b/x/test/base.py @@ -0,0 +1,36 @@ +import os +import shutil +import tempfile +import unittest +import yaml + +from x import db +from x import resource as xr +from x import signals as xs + + +class BaseResourceTest(unittest.TestCase): + def setUp(self): + self.storage_dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.storage_dir) + db.clear() + xs.clear() + + def make_resource_meta(self, meta_yaml): + meta = yaml.load(meta_yaml) + + path = os.path.join(self.storage_dir, meta['id']) + os.makedirs(path) + with open(os.path.join(path, 'meta.yaml'), 'w') as f: + f.write(meta_yaml) + + return path + + def create_resource(self, name, src, args): + dst = os.path.join(self.storage_dir, 'rs', name) + os.makedirs(dst) + + return xr.create(name, src, dst, args) + diff --git a/x/test/test_signals.py b/x/test/test_signals.py index acf91a85..26c49ded 100644 --- a/x/test/test_signals.py +++ b/x/test/test_signals.py @@ -1,39 +1,34 @@ -import os -import shutil -import tempfile import unittest -import yaml -from x import db -from x import resource as xr +import base + from x import signals as xs -class TestListInput(unittest.TestCase): - def setUp(self): - self.storage_dir = tempfile.mkdtemp() +class TestBaseInput(base.BaseResourceTest): + def test_input_dict_type(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + values: {} + """) - def tearDown(self): - shutil.rmtree(self.storage_dir) - db.clear() - xs.clear() + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'values': {'a': 1, 'b': 2}} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'values': None} + ) + xs.connect(sample1, sample2) + self.assertItemsEqual( + sample1.args['values'], + sample2.args['values'], + ) - def make_resource_meta(self, meta_yaml): - meta = yaml.load(meta_yaml) - - path = os.path.join(self.storage_dir, meta['id']) - os.makedirs(path) - with open(os.path.join(path, 'meta.yaml'), 'w') as f: - f.write(meta_yaml) - - return path - - def create_resource(self, name, src, args): - dst = os.path.join(self.storage_dir, 'rs', name) - os.makedirs(dst) - - return xr.create(name, src, dst, args) +class TestListInput(base.BaseResourceTest): def test_list_input_single(self): sample_meta_dir = self.make_resource_meta(""" id: sample @@ -79,7 +74,6 @@ input-types: } ) - def test_list_input_multi(self): sample_meta_dir = self.make_resource_meta(""" id: sample From f18971c092e2234ab2a143fa4b3cef98ad3d064a Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 10:02:19 +0200 Subject: [PATCH 43/87] Add listen_port to haproxy_config This simplifies port rendering logic in docker container --- haproxy.cfg | 2 +- haproxy_deployment/haproxy-deployment.yaml | 7 ++++++- haproxy_deployment/haproxy_deployment.py | 17 ++++++++++++----- x/resources/docker_container/actions/run.yml | 5 ++--- x/resources/haproxy/actions/run.yml | 1 + x/resources/haproxy/meta.yaml | 2 ++ x/resources/haproxy_config/meta.yaml | 1 + 7 files changed, 25 insertions(+), 10 deletions(-) diff --git a/haproxy.cfg b/haproxy.cfg index 93401c17..ea258a27 100644 --- a/haproxy.cfg +++ b/haproxy.cfg @@ -29,7 +29,7 @@ defaults # balance roundrobin {% for service in haproxy_services %} -listen {{ service['name'] }} 0.0.0.0:{{ service['servers'][0]['port'] }} +listen {{ service['name'] }} 0.0.0.0:{{ service['listen_port'] }} mode http stats enable stats uri /haproxy?stats diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index aac1d3b3..cd50dfc7 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -72,6 +72,7 @@ resources: model: x/resources/haproxy_config/ args: servers: {} + port: 5000 ports: {} ssh_user: ssh_key: @@ -112,6 +113,7 @@ resources: model: x/resources/haproxy_config/ args: servers: {} + port: 8774 ports: {} ssh_user: ssh_key: @@ -120,6 +122,7 @@ resources: model: x/resources/haproxy/ args: ip: + listen_ports: {} configs: {} configs_ports: {} ssh_user: @@ -182,11 +185,13 @@ connections: - emitter: haproxy_keystone_config receiver: haproxy-config mapping: + port: listen_ports ports: configs_ports servers: configs - emitter: haproxy_nova_config receiver: haproxy-config mapping: + port: listen_ports ports: configs_ports servers: configs @@ -194,7 +199,7 @@ connections: receiver: haproxy mapping: ip: ip - configs_ports: ports + listen_ports: ports ssh_user: ssh_user ssh_key: ssh_key config_dir: host_binds diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index 47dca192..21e32585 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -72,29 +72,36 @@ class TestHAProxyDeployment(unittest.TestCase): self.assertEqual(node5.args['ip'], haproxy.args['ip']) self.assertEqual(node5.args['ssh_key'], haproxy.args['ssh_key']) self.assertEqual(node5.args['ssh_user'], haproxy.args['ssh_user']) - self.assertItemsEqual( + self.assertDictEqual( haproxy_config.args['configs'], { 'haproxy_keystone_config': haproxy_keystone_config.args['servers'], 'haproxy_nova_config': haproxy_nova_config.args['servers'], } ) - self.assertItemsEqual( + self.assertDictEqual( haproxy_config.args['configs_ports'], { 'haproxy_keystone_config': haproxy_keystone_config.args['ports'], 'haproxy_nova_config': haproxy_nova_config.args['ports'], } ) - self.assertItemsEqual( + self.assertDictEqual( + haproxy_config.args['listen_ports'], + { + 'haproxy_keystone_config': haproxy_keystone_config.args['port'], + 'haproxy_nova_config': haproxy_nova_config.args['port'], + } + ) + self.assertDictEqual( { 'haproxy-config': haproxy_config.args['config_dir'], }, haproxy.args['host_binds'] ) - self.assertItemsEqual( + self.assertDictEqual( haproxy.args['ports'], - haproxy_config.args['configs_ports'], + haproxy_config.args['listen_ports'], ) diff --git a/x/resources/docker_container/actions/run.yml b/x/resources/docker_container/actions/run.yml index f767054e..f8ceab11 100644 --- a/x/resources/docker_container/actions/run.yml +++ b/x/resources/docker_container/actions/run.yml @@ -10,10 +10,9 @@ image: {{ image }} state: running ports: - {% for name, ports_dict in ports.items() %} - # TODO: this is ugly + {% for name, port in ports.items() %} # {{ name }} - - {{ ports_dict.values()[0] }}:{{ ports_dict.values()[0] }} + - {{ port }}:{{ port }} {% endfor %} volumes: # TODO: host_binds might need more work diff --git a/x/resources/haproxy/actions/run.yml b/x/resources/haproxy/actions/run.yml index 56dbdac8..7c4a9b35 100644 --- a/x/resources/haproxy/actions/run.yml +++ b/x/resources/haproxy/actions/run.yml @@ -7,6 +7,7 @@ haproxy_services: {% for service, servers in configs.items() %} - name: {{ service }} + listen_port: {{ listen_ports[service] }} servers: {% for name, ip in servers.items() %} - name: {{ name }} diff --git a/x/resources/haproxy/meta.yaml b/x/resources/haproxy/meta.yaml index 5459ee0f..e919f9af 100644 --- a/x/resources/haproxy/meta.yaml +++ b/x/resources/haproxy/meta.yaml @@ -4,10 +4,12 @@ version: 1.0.0 input: ip: config_dir: /etc/haproxy + listen_ports: configs: configs_ports: ssh_user: ssh_key: input-types: + listen_ports: list configs: list configs_ports: list diff --git a/x/resources/haproxy_config/meta.yaml b/x/resources/haproxy_config/meta.yaml index 48b2d171..593d41f1 100644 --- a/x/resources/haproxy_config/meta.yaml +++ b/x/resources/haproxy_config/meta.yaml @@ -2,6 +2,7 @@ id: haproxy_config handler: ansible version: 1.0.0 input: + port: ports: servers: input-types: From 8b2e7259caafc79ccdeb85cc57daab1956c7e114 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 10:14:00 +0200 Subject: [PATCH 44/87] HAProxy config listen_port name fix --- haproxy_deployment/haproxy-deployment.yaml | 8 ++++---- haproxy_deployment/haproxy_deployment.py | 4 ++-- x/resources/haproxy_config/meta.yaml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index cd50dfc7..78b57a10 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -72,7 +72,7 @@ resources: model: x/resources/haproxy_config/ args: servers: {} - port: 5000 + listen_port: 5000 ports: {} ssh_user: ssh_key: @@ -113,7 +113,7 @@ resources: model: x/resources/haproxy_config/ args: servers: {} - port: 8774 + listen_port: 8774 ports: {} ssh_user: ssh_key: @@ -185,13 +185,13 @@ connections: - emitter: haproxy_keystone_config receiver: haproxy-config mapping: - port: listen_ports + listen_port: listen_ports ports: configs_ports servers: configs - emitter: haproxy_nova_config receiver: haproxy-config mapping: - port: listen_ports + listen_port: listen_ports ports: configs_ports servers: configs diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index 21e32585..edf44cb9 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -89,8 +89,8 @@ class TestHAProxyDeployment(unittest.TestCase): self.assertDictEqual( haproxy_config.args['listen_ports'], { - 'haproxy_keystone_config': haproxy_keystone_config.args['port'], - 'haproxy_nova_config': haproxy_nova_config.args['port'], + 'haproxy_keystone_config': haproxy_keystone_config.args['listen_port'], + 'haproxy_nova_config': haproxy_nova_config.args['listen_port'], } ) self.assertDictEqual( diff --git a/x/resources/haproxy_config/meta.yaml b/x/resources/haproxy_config/meta.yaml index 593d41f1..3ad9355f 100644 --- a/x/resources/haproxy_config/meta.yaml +++ b/x/resources/haproxy_config/meta.yaml @@ -2,7 +2,7 @@ id: haproxy_config handler: ansible version: 1.0.0 input: - port: + listen_port: ports: servers: input-types: From 067922e71ef02656c24365c910e28330db830b66 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 10:23:59 +0200 Subject: [PATCH 45/87] Use graphviz to render graph instead of matplotlib It looks better --- cli.py | 24 +++++++++++++++--------- main.yml | 3 ++- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/cli.py b/cli.py index d10dcbee..0477d01b 100644 --- a/cli.py +++ b/cli.py @@ -1,10 +1,11 @@ import click import json -import matplotlib -matplotlib.use('Agg') # don't show windows -import matplotlib.pyplot as plt +#import matplotlib +#matplotlib.use('Agg') # don't show windows +#import matplotlib.pyplot as plt import networkx as nx import os +import subprocess from x import actions as xa from x import deployment as xd @@ -146,12 +147,17 @@ def init_cli_connections(): @click.command() def graph(): g = xs.connection_graph() - pos = nx.spring_layout(g) - nx.draw_networkx_nodes(g, pos) - nx.draw_networkx_edges(g, pos, arrows=True) - nx.draw_networkx_labels(g, pos) - plt.axis('off') - plt.savefig('graph.png') + + nx.write_dot(g, 'graph.dot') + subprocess.call(['dot', '-Tps', 'graph.dot', '-o', 'graph.ps']) + + # Matplotlib + #pos = nx.spring_layout(g) + #nx.draw_networkx_nodes(g, pos) + #nx.draw_networkx_edges(g, pos, arrows=True) + #nx.draw_networkx_labels(g, pos) + #plt.axis('off') + #plt.savefig('graph.png') connections.add_command(graph) diff --git a/main.yml b/main.yml index e5876ba2..b5673080 100644 --- a/main.yml +++ b/main.yml @@ -16,7 +16,8 @@ - shell: pip install -r /vagrant/requirements.txt # Graph drawing - - apt: name=python-matplotlib state=present + #- apt: name=python-matplotlib state=present + - apt: name=python-graphviz state=present # Setup development env for solar #- shell: python setup.py develop chdir=/vagrant/solar From c5c8f2d754eb4b8aee5d52efe09c23cd402b8c4f Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 10:42:25 +0200 Subject: [PATCH 46/87] Generated detailed connection graph, also output to PNG --- cli.py | 5 +++-- x/signals.py | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/cli.py b/cli.py index 0477d01b..c0f72eef 100644 --- a/cli.py +++ b/cli.py @@ -146,10 +146,11 @@ def init_cli_connections(): # TODO: this requires graphing libraries @click.command() def graph(): - g = xs.connection_graph() + #g = xs.connection_graph() + g = xs.detailed_connection_graph() nx.write_dot(g, 'graph.dot') - subprocess.call(['dot', '-Tps', 'graph.dot', '-o', 'graph.ps']) + subprocess.call(['dot', '-Tpng', 'graph.dot', '-o', 'graph.png']) # Matplotlib #pos = nx.spring_layout(g) diff --git a/x/signals.py b/x/signals.py index e5827962..7e6270c7 100644 --- a/x/signals.py +++ b/x/signals.py @@ -166,3 +166,17 @@ def connection_graph(): ) return g + + +def detailed_connection_graph(): + g = nx.MultiDiGraph() + + for emitter_name, destination_values in CLIENTS.items(): + for emitter_input, receivers in CLIENTS[emitter_name].items(): + for receiver_name, receiver_input in receivers: + label = emitter_input + if emitter_input != receiver_input: + label = '{}:{}'.format(emitter_input, receiver_input) + g.add_edge(emitter_name, receiver_name, label=label) + + return g From 40e36d2bc4cbc6e562c355da06245e627db18086 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 22 Apr 2015 09:42:34 +0000 Subject: [PATCH 47/87] Add mariadb db and user resources --- x/resources/mariadb_table/meta.yaml | 10 ---------- x/resources/mariadb_user/actions/remove.yml | 11 +++++++++++ x/resources/mariadb_user/actions/run.yml | 13 +++++++++++++ x/resources/mariadb_user/meta.yaml | 10 ++++++++-- 4 files changed, 32 insertions(+), 12 deletions(-) delete mode 100644 x/resources/mariadb_table/meta.yaml create mode 100644 x/resources/mariadb_user/actions/remove.yml create mode 100644 x/resources/mariadb_user/actions/run.yml diff --git a/x/resources/mariadb_table/meta.yaml b/x/resources/mariadb_table/meta.yaml deleted file mode 100644 index 40b92c47..00000000 --- a/x/resources/mariadb_table/meta.yaml +++ /dev/null @@ -1,10 +0,0 @@ -id: mariadb_user -handler: ansible -version: 1.0.0 -actions: - run: run.yml - remove: remove.yml -input: - name: name - password: password - users: [] diff --git a/x/resources/mariadb_user/actions/remove.yml b/x/resources/mariadb_user/actions/remove.yml new file mode 100644 index 00000000..9df0be6c --- /dev/null +++ b/x/resources/mariadb_user/actions/remove.yml @@ -0,0 +1,11 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb user + mysql_user: + name: {{name}} + state: absent + login_user: root + login_password: {{login_password}} + login_port: {{login_port}} + login_host: 127.0.0.1 diff --git a/x/resources/mariadb_user/actions/run.yml b/x/resources/mariadb_user/actions/run.yml new file mode 100644 index 00000000..7f50378a --- /dev/null +++ b/x/resources/mariadb_user/actions/run.yml @@ -0,0 +1,13 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb user + mysql_user: + name: {{name}} + password: {{password}} + priv: {{db}}.*:ALL + state: present + login_user: root + login_password: {{login_password}} + login_port: {{login_port}} + login_host: 127.0.0.1 diff --git a/x/resources/mariadb_user/meta.yaml b/x/resources/mariadb_user/meta.yaml index db859484..2d5a34aa 100644 --- a/x/resources/mariadb_user/meta.yaml +++ b/x/resources/mariadb_user/meta.yaml @@ -5,5 +5,11 @@ actions: run: run.yml remove: remove.yml input: - name: name - password: password + password: + db: + login_password: + login_port: + login_user: + ip: + ssh_key: + ssh_user: From ecd9e3bda23843e66cc356891ce0582bbb40c6e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 22 Apr 2015 09:50:20 +0000 Subject: [PATCH 48/87] Mariadb resource --- x/resources/mariadb_db/actions/remove.yml | 11 +++++++++++ x/resources/mariadb_db/actions/run.yml | 11 +++++++++++ x/resources/mariadb_db/meta.yaml | 13 +++++++++++++ 3 files changed, 35 insertions(+) create mode 100644 x/resources/mariadb_db/actions/remove.yml create mode 100644 x/resources/mariadb_db/actions/run.yml create mode 100644 x/resources/mariadb_db/meta.yaml diff --git a/x/resources/mariadb_db/actions/remove.yml b/x/resources/mariadb_db/actions/remove.yml new file mode 100644 index 00000000..fe6d6488 --- /dev/null +++ b/x/resources/mariadb_db/actions/remove.yml @@ -0,0 +1,11 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb db + mysql_db: + name: {{name}} + state: absent + login_user: root + login_password: {{login_password}} + login_port: {{login_port}} + login_host: 127.0.0.1 diff --git a/x/resources/mariadb_db/actions/run.yml b/x/resources/mariadb_db/actions/run.yml new file mode 100644 index 00000000..fda96b5b --- /dev/null +++ b/x/resources/mariadb_db/actions/run.yml @@ -0,0 +1,11 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: mariadb db + mysql_db: + name: {{name}} + state: present + login_user: root + login_password: {{login_password}} + login_port: {{login_port}} + login_host: 127.0.0.1 diff --git a/x/resources/mariadb_db/meta.yaml b/x/resources/mariadb_db/meta.yaml new file mode 100644 index 00000000..6bd49b7d --- /dev/null +++ b/x/resources/mariadb_db/meta.yaml @@ -0,0 +1,13 @@ +id: mariadb_table +handler: ansible +version: 1.0.0 +actions: + run: run.yml + remove: remove.yml +input: + login_password: + login_port: + login_user: + ip: + ssh_key: + ssh_user: From e8f74b9cea3b2a7b23992b001c3df0aa83b7529f Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 13:47:15 +0200 Subject: [PATCH 49/87] Old tests pass now --- simple-deployment.yaml | 43 +++++++++++++++++ x/observer.py | 106 +++++++++++++++++++++++++++++++++++++++++ x/resource.py | 60 +++++++++++++++-------- x/signals.py | 25 ++++++++-- x/test/test_signals.py | 34 ++++++------- 5 files changed, 229 insertions(+), 39 deletions(-) create mode 100755 simple-deployment.yaml create mode 100644 x/observer.py diff --git a/simple-deployment.yaml b/simple-deployment.yaml new file mode 100755 index 00000000..7360183d --- /dev/null +++ b/simple-deployment.yaml @@ -0,0 +1,43 @@ +# HAProxy deployment with MariaDB, Keystone and Nova + +workdir: /vagrant +resource-save-path: rs/ +#test-suite: haproxy_deployment.haproxy_deployment + +resources: + - name: node1 + model: x/resources/ro_node/ + args: + ip: 10.0.0.3 + ssh_key: /vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key + ssh_user: vagrant + + - name: keystone1 + model: x/resources/keystone/ + args: + ip: + image: TEST + ssh_user: + ssh_key: + + - name: haproxy_keystone_config + model: x/resources/haproxy_config/ + args: + listen_port: 5000 + ports: {} + servers: {} + + +connections: + - emitter: node1 + receiver: keystone1 + + # Multiple subscription test + - emitter: node1 + receiver: keystone1 + + - emitter: keystone1 + receiver: haproxy_keystone_config + mapping: + ip: servers + port: ports diff --git a/x/observer.py b/x/observer.py new file mode 100644 index 00000000..a8d0c546 --- /dev/null +++ b/x/observer.py @@ -0,0 +1,106 @@ +class BaseObserver(object): + type_ = None + + def __init__(self, attached_to, name, value): + """ + :param attached_to: resource.Resource + :param name: + :param value: + :return: + """ + self.attached_to = attached_to + self.name = name + self.value = value + self.receivers = [] + + def log(self, msg): + print '{} {}'.format(self, msg) + + def __repr__(self): + return '[{}:{}]'.format(self.attached_to.name, self.name) + + def notify(self, emitter): + """ + :param emitter: Observer + :return: + """ + raise NotImplementedError + + def update(self, value): + """ + :param value: + :return: + """ + raise NotImplementedError + + def subscribe(self, receiver): + """ + :param receiver: Observer + :return: + """ + self.log('Subscribe {}'.format(receiver)) + # No multiple subscriptions + fltr = [r for r in self.receivers + if r.attached_to == receiver.attached_to + and r.name == receiver.name] + if fltr: + self.log('No multiple subscriptions from {}'.format(receiver)) + return + self.receivers.append(receiver) + receiver.notify(self) + + def unsubscribe(self, receiver): + """ + :param receiver: Observer + :return: + """ + self.log('Unsubscribe {}'.format(receiver)) + self.receivers.remove(receiver) + # TODO: ? + #receiver.notify(self) + + +class Observer(BaseObserver): + type_ = 'simple' + + def __init__(self, *args, **kwargs): + super(Observer, self).__init__(*args, **kwargs) + # TODO: + # Simple observer can be attached to at most one emitter + self.emitter = None + + def notify(self, emitter): + self.log('Notify from {} value {}'.format(emitter, emitter.value)) + self.value = emitter.value + for receiver in self.receivers: + receiver.notify(self) + self.attached_to.save() + + def update(self, value): + self.log('Updating to value {}'.format(value)) + self.value = value + for receiver in self.receivers: + receiver.notify(self) + self.attached_to.save() + + def subscribe(self, receiver): + # TODO: + super(Observer, self).subscribe(receiver) + + +class ListObserver(BaseObserver): + type_ = 'list' + + def notify(self, emitter): + self.log('Notify from {} value {}'.format(emitter, emitter.value)) + self.value[emitter.attached_to.name] = emitter.value + for receiver in self.receivers: + receiver.notify(self) + self.attached_to.save() + + +def create(type_, *args, **kwargs): + for klass in BaseObserver.__subclasses__(): + if klass.type_ == type_: + return klass(*args, **kwargs) + raise NotImplementedError('No handling class for type {}'.format(type_)) diff --git a/x/resource.py b/x/resource.py index 895105d1..8603bf12 100644 --- a/x/resource.py +++ b/x/resource.py @@ -1,14 +1,15 @@ # -*- coding: UTF-8 -*- +import copy import json import os import shutil import yaml -import actions -import signals -import db - +from x import actions +from x import db +from x import observer +from x import signals from x import utils @@ -20,7 +21,10 @@ class Resource(object): self.actions = metadata['actions'].keys() if metadata['actions'] else None self.requires = metadata['input'].keys() self._validate_args(args, metadata['input']) - self.args = args + self.args = {} + for arg_name, arg_value in args.items(): + type_ = metadata.get('input-types', {}).get(arg_name, 'simple') + self.args[arg_name] = observer.create(type_, self, arg_name, arg_value) self.metadata['input'] = args self.input_types = metadata.get('input-types', {}) self.changed = [] @@ -30,10 +34,13 @@ class Resource(object): return ("Resource('name={0}', metadata={1}, args={2}, " "base_dir='{3}', tags={4})").format(self.name, json.dumps(self.metadata), - json.dumps(self.args), + json.dumps(self.args_dict()), self.base_dir, self.tags) + def args_dict(self): + return {k: v.value for k, v in self.args.items()} + def add_tag(self, tag): if tag not in self.tags: self.tags.append(tag) @@ -44,18 +51,27 @@ class Resource(object): except ValueError: pass - def update(self, args, emitter=None): - for key, value in args.iteritems(): - if self.input_types.get(key, '') == 'list': - if emitter is None: - raise Exception('I need to know the emitter when updating input of list type') - self.args[key][emitter.name] = value - else: - self.args[key] = value - self.changed.append(key) - signals.notify(self, key, value) + def notify(self, emitter): + """Update resource's args from emitter's args. - self.save() + :param emitter: Resource + :return: + """ + for key, value in emitter.args.iteritems(): + self.args[key].notify(value) + + def update(self, args): + """This method updates resource's args with a simple dict. + + :param args: + :return: + """ + # Update will be blocked if this resource is listening + # on some input that is to be updated -- we should only listen + # to the emitter and not be able to change the input's value + + for key, value in args.iteritems(): + self.args[key].update(value) def action(self, action): if action in self.actions: @@ -75,11 +91,14 @@ class Resource(object): # TODO: versioning def save(self): - self.metadata['tags'] = self.tags + metadata = copy.deepcopy(self.metadata) + + metadata['tags'] = self.tags + metadata['args'] = self.args_dict() meta_file = os.path.join(self.base_dir, 'meta.yaml') with open(meta_file, 'w') as f: - f.write(yaml.dump(self.metadata)) + f.write(yaml.dump(metadata)) def create(name, base_path, dest_path, args, connections={}): @@ -111,6 +130,7 @@ def create(name, base_path, dest_path, args, connections={}): shutil.copytree(base_path, dest_path) resource.save() db.resource_add(name, resource) + return resource @@ -136,4 +156,6 @@ def load_all(dest_path): resource = load(resource_path) ret[resource.name] = resource + signals.reconnect_all() + return ret diff --git a/x/signals.py b/x/signals.py index 7e6270c7..6679831a 100644 --- a/x/signals.py +++ b/x/signals.py @@ -62,17 +62,36 @@ def connect(emitter, receiver, mapping=None): connect_src_dst(emitter, src, receiver, dst) receiver.save() - utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) def connect_src_dst(emitter, src, receiver, dst): + if src not in emitter.args: + return + CLIENTS.setdefault(emitter.name, {}) CLIENTS[emitter.name].setdefault(src, []) CLIENTS[emitter.name][src].append((receiver.name, dst)) + emitter.args[src].subscribe(receiver.args[dst]) + # Copy emitter's values to receiver - if src in emitter.args: - receiver.update({dst: emitter.args[src]}, emitter=emitter) + #receiver.update({dst: emitter.args[src]}, emitter=emitter) + + utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + + +def reconnect_all(): + """Reconstruct connections for resource inputs from CLIENTS. + + :return: + """ + for emitter_name, dest_dict in CLIENTS.items(): + emitter = db.get_resource(emitter_name) + for emitter_input, destinations in dest_dict.items(): + for receiver_name, receiver_input in destinations: + receiver = db.get_resource(receiver_name) + receiver.args[receiver_input].subscribe( + emitter.args[emitter_input]) def disconnect(emitter, receiver): diff --git a/x/test/test_signals.py b/x/test/test_signals.py index 26c49ded..c5ef67da 100644 --- a/x/test/test_signals.py +++ b/x/test/test_signals.py @@ -23,8 +23,8 @@ input: ) xs.connect(sample1, sample2) self.assertItemsEqual( - sample1.args['values'], - sample2.args['values'], + sample1.args['values'].value, + sample2.args['values'].value, ) @@ -59,18 +59,18 @@ input-types: xs.connect(sample1, list_input_single, mapping={'ip': 'ips'}) self.assertItemsEqual( - list_input_single.args['ips'], + list_input_single.args['ips'].value, { - 'sample1': sample1.args['ip'], + 'sample1': sample1.args['ip'].value, } ) xs.connect(sample2, list_input_single, mapping={'ip': 'ips'}) self.assertItemsEqual( - list_input_single.args['ips'], + list_input_single.args['ips'].value, { - 'sample1': sample1.args['ip'], - 'sample2': sample2.args['ip'], + 'sample1': sample1.args['ip'].value, + 'sample2': sample2.args['ip'].value, } ) @@ -107,31 +107,31 @@ input-types: xs.connect(sample1, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) self.assertItemsEqual( - list_input_multi.args['ips'], + list_input_multi.args['ips'].value, { - 'sample1': sample1.args['ip'], + 'sample1': sample1.args['ip'].value, } ) self.assertItemsEqual( - list_input_multi.args['ports'], + list_input_multi.args['ports'].value, { - 'sample1': sample1.args['port'], + 'sample1': sample1.args['port'].value, } ) xs.connect(sample2, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) self.assertItemsEqual( - list_input_multi.args['ips'], + list_input_multi.args['ips'].value, { - 'sample1': sample1.args['ip'], - 'sample2': sample2.args['ip'], + 'sample1': sample1.args['ip'].value, + 'sample2': sample2.args['ip'].value, } ) self.assertItemsEqual( - list_input_multi.args['ports'], + list_input_multi.args['ports'].value, { - 'sample1': sample1.args['port'], - 'sample2': sample2.args['port'], + 'sample1': sample1.args['port'].value, + 'sample2': sample2.args['port'].value, } ) From 67cc5d6871f380fa45f32b0cded0f82145401d2f Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 14:02:49 +0200 Subject: [PATCH 50/87] Disconnect fix & test --- x/observer.py | 15 ++++++++++----- x/signals.py | 13 +++---------- x/test/test_signals.py | 37 ++++++++++++++++++++++++++++++------- 3 files changed, 43 insertions(+), 22 deletions(-) diff --git a/x/observer.py b/x/observer.py index a8d0c546..416dd0b8 100644 --- a/x/observer.py +++ b/x/observer.py @@ -33,6 +33,13 @@ class BaseObserver(object): """ raise NotImplementedError + def find_receiver(self, receiver): + fltr = [r for r in self.receivers + if r.attached_to == receiver.attached_to + and r.name == receiver.name] + if fltr: + return fltr[0] + def subscribe(self, receiver): """ :param receiver: Observer @@ -40,10 +47,7 @@ class BaseObserver(object): """ self.log('Subscribe {}'.format(receiver)) # No multiple subscriptions - fltr = [r for r in self.receivers - if r.attached_to == receiver.attached_to - and r.name == receiver.name] - if fltr: + if self.find_receiver(receiver): self.log('No multiple subscriptions from {}'.format(receiver)) return self.receivers.append(receiver) @@ -55,7 +59,8 @@ class BaseObserver(object): :return: """ self.log('Unsubscribe {}'.format(receiver)) - self.receivers.remove(receiver) + if self.find_receiver(receiver): + self.receivers.remove(receiver) # TODO: ? #receiver.notify(self) diff --git a/x/signals.py b/x/signals.py index 6679831a..e7ffada7 100644 --- a/x/signals.py +++ b/x/signals.py @@ -56,7 +56,7 @@ def connect(emitter, receiver, mapping=None): for src, dst in mapping.items(): # Disconnect all receiver inputs # Check if receiver input is of list type first - if receiver.input_types.get(dst, '') != 'list': + if receiver.args[dst].type_ != 'list': disconnect_receiver_by_input(receiver, dst) connect_src_dst(emitter, src, receiver, dst) @@ -103,18 +103,11 @@ def disconnect(emitter, receiver): for destination in destinations: receiver_input = destination[1] - if receiver.input_types.get(receiver_input, '') == 'list': + if receiver.args[receiver_input].type_ != 'list': print 'Removing input {} from {}'.format(receiver_input, receiver.name) - # TODO: update here? We're deleting an input... - receiver.args[receiver_input] = { - k: v for k, v in receiver.args.get(receiver_input, {}).items() - if k != emitter.name - } - - disconnect_by_src(emitter, src, receiver) + emitter.args[src].unsubscribe(receiver.args[receiver_input]) # Inputs might have changed - receiver.save() utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) diff --git a/x/test/test_signals.py b/x/test/test_signals.py index c5ef67da..6371d68d 100644 --- a/x/test/test_signals.py +++ b/x/test/test_signals.py @@ -22,11 +22,34 @@ input: 'sample2', sample_meta_dir, {'values': None} ) xs.connect(sample1, sample2) - self.assertItemsEqual( + self.assertDictEqual( sample1.args['values'].value, sample2.args['values'].value, ) + # Check update + sample1.update({'values': {'a': 2}}) + self.assertDictEqual( + sample1.args['values'].value, + {'a': 2} + ) + self.assertDictEqual( + sample1.args['values'].value, + sample2.args['values'].value, + ) + + # Check disconnect + xs.disconnect(sample1, sample2) + sample1.update({'values': {'a': 3}}) + self.assertDictEqual( + sample1.args['values'].value, + {'a': 3} + ) + self.assertDictEqual( + sample2.args['values'].value, + {'a': 2} + ) + class TestListInput(base.BaseResourceTest): def test_list_input_single(self): @@ -58,7 +81,7 @@ input-types: ) xs.connect(sample1, list_input_single, mapping={'ip': 'ips'}) - self.assertItemsEqual( + self.assertDictEqual( list_input_single.args['ips'].value, { 'sample1': sample1.args['ip'].value, @@ -66,7 +89,7 @@ input-types: ) xs.connect(sample2, list_input_single, mapping={'ip': 'ips'}) - self.assertItemsEqual( + self.assertDictEqual( list_input_single.args['ips'].value, { 'sample1': sample1.args['ip'].value, @@ -106,13 +129,13 @@ input-types: ) xs.connect(sample1, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) - self.assertItemsEqual( + self.assertDictEqual( list_input_multi.args['ips'].value, { 'sample1': sample1.args['ip'].value, } ) - self.assertItemsEqual( + self.assertDictEqual( list_input_multi.args['ports'].value, { 'sample1': sample1.args['port'].value, @@ -120,14 +143,14 @@ input-types: ) xs.connect(sample2, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) - self.assertItemsEqual( + self.assertDictEqual( list_input_multi.args['ips'].value, { 'sample1': sample1.args['ip'].value, 'sample2': sample2.args['ip'].value, } ) - self.assertItemsEqual( + self.assertDictEqual( list_input_multi.args['ports'].value, { 'sample1': sample1.args['port'].value, From 5e0d6e27fc4018ea420d259516343512855c7e66 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 14:11:26 +0200 Subject: [PATCH 51/87] Fix unsubscribing for ListObserver --- x/observer.py | 18 ++++++++++++++++++ x/signals.py | 3 --- x/test/test_signals.py | 10 ++++++++++ 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/x/observer.py b/x/observer.py index 416dd0b8..ae8a92d2 100644 --- a/x/observer.py +++ b/x/observer.py @@ -51,8 +51,12 @@ class BaseObserver(object): self.log('No multiple subscriptions from {}'.format(receiver)) return self.receivers.append(receiver) + receiver.subscribed(self) receiver.notify(self) + def subscribed(self, emitter): + self.log('Subscribed {}'.format(emitter)) + def unsubscribe(self, receiver): """ :param receiver: Observer @@ -61,9 +65,13 @@ class BaseObserver(object): self.log('Unsubscribe {}'.format(receiver)) if self.find_receiver(receiver): self.receivers.remove(receiver) + receiver.unsubscribed(self) # TODO: ? #receiver.notify(self) + def unsubscribed(self, emitter): + self.log('Unsubscribed {}'.format(emitter)) + class Observer(BaseObserver): type_ = 'simple' @@ -76,6 +84,7 @@ class Observer(BaseObserver): def notify(self, emitter): self.log('Notify from {} value {}'.format(emitter, emitter.value)) + # Copy emitter's values to receiver self.value = emitter.value for receiver in self.receivers: receiver.notify(self) @@ -98,11 +107,20 @@ class ListObserver(BaseObserver): def notify(self, emitter): self.log('Notify from {} value {}'.format(emitter, emitter.value)) + # Copy emitter's values to receiver self.value[emitter.attached_to.name] = emitter.value for receiver in self.receivers: receiver.notify(self) self.attached_to.save() + def unsubscribed(self, emitter): + """ + :param receiver: Observer + :return: + """ + self.log('Unsubscribed emitter {}'.format(emitter)) + self.value.pop(emitter.attached_to.name) + def create(type_, *args, **kwargs): for klass in BaseObserver.__subclasses__(): diff --git a/x/signals.py b/x/signals.py index e7ffada7..5eac2817 100644 --- a/x/signals.py +++ b/x/signals.py @@ -74,9 +74,6 @@ def connect_src_dst(emitter, src, receiver, dst): emitter.args[src].subscribe(receiver.args[dst]) - # Copy emitter's values to receiver - #receiver.update({dst: emitter.args[src]}, emitter=emitter) - utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) diff --git a/x/test/test_signals.py b/x/test/test_signals.py index 6371d68d..6601c43c 100644 --- a/x/test/test_signals.py +++ b/x/test/test_signals.py @@ -39,6 +39,7 @@ input: ) # Check disconnect + # TODO: should sample2.value be reverted to original value? xs.disconnect(sample1, sample2) sample1.update({'values': {'a': 3}}) self.assertDictEqual( @@ -97,6 +98,15 @@ input-types: } ) + # Test disconnect + xs.disconnect(sample2, list_input_single) + self.assertDictEqual( + list_input_single.args['ips'].value, + { + 'sample1': sample1.args['ip'].value, + } + ) + def test_list_input_multi(self): sample_meta_dir = self.make_resource_meta(""" id: sample From fdd756f44245cc41b74275998cda311298f34505 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 14:14:30 +0200 Subject: [PATCH 52/87] Small disconnect refactor --- x/signals.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/x/signals.py b/x/signals.py index 5eac2817..70ac721d 100644 --- a/x/signals.py +++ b/x/signals.py @@ -93,10 +93,7 @@ def reconnect_all(): def disconnect(emitter, receiver): for src, destinations in CLIENTS[emitter.name].items(): - destinations = [ - destination for destination in destinations - if destination[0] == receiver.name - ] + disconnect_by_src(emitter, src, receiver) for destination in destinations: receiver_input = destination[1] @@ -104,9 +101,6 @@ def disconnect(emitter, receiver): print 'Removing input {} from {}'.format(receiver_input, receiver.name) emitter.args[src].unsubscribe(receiver.args[receiver_input]) - # Inputs might have changed - utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) - def disconnect_receiver_by_input(receiver, input): """Find receiver connection by input and disconnect it. @@ -127,6 +121,8 @@ def disconnect_by_src(emitter, src, receiver): if destination[0] != receiver.name ] + utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + def notify(source, key, value): CLIENTS.setdefault(source.name, {}) From 2f960229ab7c426f6dfa6ddcc21e6a0cc2f4121d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 22 Apr 2015 16:21:46 +0000 Subject: [PATCH 53/87] Keystone resource and action run --- x/resources/keystone/actions/run.yml | 13 ++++++++++--- x/resources/keystone/meta.yaml | 8 ++++++-- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/x/resources/keystone/actions/run.yml b/x/resources/keystone/actions/run.yml index e223fe8f..7146e52d 100644 --- a/x/resources/keystone/actions/run.yml +++ b/x/resources/keystone/actions/run.yml @@ -1,6 +1,13 @@ -# TODO - hosts: [{{ ip }}] sudo: yes tasks: - - shell: docker run -d --net="host" --privileged \ - --name {{ name }} {{ image }} + - name: keystone container + docker: + name: {{ name }} + image: {{ image }} + state: running + ports: + - {{ port }}:5000 + - {{ admin_port }}:35357 + volumnes: + - {{ config_dir }}:/etc/keystone diff --git a/x/resources/keystone/meta.yaml b/x/resources/keystone/meta.yaml index 404e2c7e..fbc98de0 100644 --- a/x/resources/keystone/meta.yaml +++ b/x/resources/keystone/meta.yaml @@ -2,6 +2,10 @@ id: keystone handler: ansible version: 1.0.0 input: + image: kollaglue/centos-rdo-keystone + config_dir: + admin_port: + port: ip: - port: 5000 - image: garland/docker-openstack-keystone + ssh_key: + ssh_user: From 974761fe30d27147388349407e036962aa79c609 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 20:28:40 +0200 Subject: [PATCH 54/87] Test fixes, __eq__ added to Observer --- haproxy_deployment/haproxy_deployment.py | 18 +++--- x/observer.py | 6 ++ x/resource.py | 2 +- x/test/test_signals.py | 72 ++++++++++++------------ 4 files changed, 52 insertions(+), 46 deletions(-) diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index edf44cb9..99e547a7 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -18,14 +18,14 @@ class TestHAProxyDeployment(unittest.TestCase): keystone2 = db.get_resource('keystone2') haproxy_keystone_config = db.get_resource('haproxy_keystone_config') - self.assertDictEqual( + self.assertEqual( haproxy_keystone_config.args['servers'], { 'keystone1': keystone1.args['ip'], 'keystone2': keystone2.args['ip'], } ) - self.assertDictEqual( + self.assertEqual( haproxy_keystone_config.args['ports'], { 'keystone1': keystone1.args['port'], @@ -47,14 +47,14 @@ class TestHAProxyDeployment(unittest.TestCase): nova2 = db.get_resource('nova2') haproxy_nova_config = db.get_resource('haproxy_nova_config') - self.assertDictEqual( + self.assertEqual( haproxy_nova_config.args['servers'], { 'nova1': nova1.args['ip'], 'nova2': nova2.args['ip'], } ) - self.assertDictEqual( + self.assertEqual( haproxy_nova_config.args['ports'], { 'nova1': nova1.args['port'], @@ -72,34 +72,34 @@ class TestHAProxyDeployment(unittest.TestCase): self.assertEqual(node5.args['ip'], haproxy.args['ip']) self.assertEqual(node5.args['ssh_key'], haproxy.args['ssh_key']) self.assertEqual(node5.args['ssh_user'], haproxy.args['ssh_user']) - self.assertDictEqual( + self.assertEqual( haproxy_config.args['configs'], { 'haproxy_keystone_config': haproxy_keystone_config.args['servers'], 'haproxy_nova_config': haproxy_nova_config.args['servers'], } ) - self.assertDictEqual( + self.assertEqual( haproxy_config.args['configs_ports'], { 'haproxy_keystone_config': haproxy_keystone_config.args['ports'], 'haproxy_nova_config': haproxy_nova_config.args['ports'], } ) - self.assertDictEqual( + self.assertEqual( haproxy_config.args['listen_ports'], { 'haproxy_keystone_config': haproxy_keystone_config.args['listen_port'], 'haproxy_nova_config': haproxy_nova_config.args['listen_port'], } ) - self.assertDictEqual( + self.assertEqual( { 'haproxy-config': haproxy_config.args['config_dir'], }, haproxy.args['host_binds'] ) - self.assertDictEqual( + self.assertEqual( haproxy.args['ports'], haproxy_config.args['listen_ports'], ) diff --git a/x/observer.py b/x/observer.py index ae8a92d2..185eec16 100644 --- a/x/observer.py +++ b/x/observer.py @@ -19,6 +19,12 @@ class BaseObserver(object): def __repr__(self): return '[{}:{}]'.format(self.attached_to.name, self.name) + def __eq__(self, other): + if isinstance(other, BaseObserver): + return self.value == other.value + + return self.value == other + def notify(self, emitter): """ :param emitter: Observer diff --git a/x/resource.py b/x/resource.py index 8603bf12..6b3c16f2 100644 --- a/x/resource.py +++ b/x/resource.py @@ -23,7 +23,7 @@ class Resource(object): self._validate_args(args, metadata['input']) self.args = {} for arg_name, arg_value in args.items(): - type_ = metadata.get('input-types', {}).get(arg_name, 'simple') + type_ = metadata.get('input-types', {}).get(arg_name) or 'simple' self.args[arg_name] = observer.create(type_, self, arg_name, arg_value) self.metadata['input'] = args self.input_types = metadata.get('input-types', {}) diff --git a/x/test/test_signals.py b/x/test/test_signals.py index 6601c43c..ea490eba 100644 --- a/x/test/test_signals.py +++ b/x/test/test_signals.py @@ -22,32 +22,32 @@ input: 'sample2', sample_meta_dir, {'values': None} ) xs.connect(sample1, sample2) - self.assertDictEqual( - sample1.args['values'].value, - sample2.args['values'].value, + self.assertEqual( + sample1.args['values'], + sample2.args['values'], ) # Check update sample1.update({'values': {'a': 2}}) - self.assertDictEqual( - sample1.args['values'].value, + self.assertEqual( + sample1.args['values'], {'a': 2} ) - self.assertDictEqual( - sample1.args['values'].value, - sample2.args['values'].value, + self.assertEqual( + sample1.args['values'], + sample2.args['values'], ) # Check disconnect # TODO: should sample2.value be reverted to original value? xs.disconnect(sample1, sample2) sample1.update({'values': {'a': 3}}) - self.assertDictEqual( - sample1.args['values'].value, + self.assertEqual( + sample1.args['values'], {'a': 3} ) - self.assertDictEqual( - sample2.args['values'].value, + self.assertEqual( + sample2.args['values'], {'a': 2} ) @@ -82,28 +82,28 @@ input-types: ) xs.connect(sample1, list_input_single, mapping={'ip': 'ips'}) - self.assertDictEqual( - list_input_single.args['ips'].value, + self.assertEqual( + list_input_single.args['ips'], { - 'sample1': sample1.args['ip'].value, + 'sample1': sample1.args['ip'], } ) xs.connect(sample2, list_input_single, mapping={'ip': 'ips'}) - self.assertDictEqual( - list_input_single.args['ips'].value, + self.assertEqual( + list_input_single.args['ips'], { - 'sample1': sample1.args['ip'].value, - 'sample2': sample2.args['ip'].value, + 'sample1': sample1.args['ip'], + 'sample2': sample2.args['ip'], } ) # Test disconnect xs.disconnect(sample2, list_input_single) - self.assertDictEqual( - list_input_single.args['ips'].value, + self.assertEqual( + list_input_single.args['ips'], { - 'sample1': sample1.args['ip'].value, + 'sample1': sample1.args['ip'], } ) @@ -139,32 +139,32 @@ input-types: ) xs.connect(sample1, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) - self.assertDictEqual( - list_input_multi.args['ips'].value, + self.assertEqual( + list_input_multi.args['ips'], { - 'sample1': sample1.args['ip'].value, + 'sample1': sample1.args['ip'], } ) - self.assertDictEqual( - list_input_multi.args['ports'].value, + self.assertEqual( + list_input_multi.args['ports'], { - 'sample1': sample1.args['port'].value, + 'sample1': sample1.args['port'], } ) xs.connect(sample2, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) - self.assertDictEqual( - list_input_multi.args['ips'].value, + self.assertEqual( + list_input_multi.args['ips'], { - 'sample1': sample1.args['ip'].value, - 'sample2': sample2.args['ip'].value, + 'sample1': sample1.args['ip'], + 'sample2': sample2.args['ip'], } ) - self.assertDictEqual( - list_input_multi.args['ports'].value, + self.assertEqual( + list_input_multi.args['ports'], { - 'sample1': sample1.args['port'].value, - 'sample2': sample2.args['port'].value, + 'sample1': sample1.args['port'], + 'sample2': sample2.args['port'], } ) From 3318f688dc42d185051ce9a8a3e5e90df5087e15 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 22 Apr 2015 20:29:06 +0200 Subject: [PATCH 55/87] Connections abstraction added over CLIENTS --- x/deployment.py | 2 +- x/observer.py | 11 ++++++++ x/resource.py | 2 +- x/signals.py | 68 ++++++++++++++++++++++++------------------------- x/test/base.py | 2 +- 5 files changed, 48 insertions(+), 37 deletions(-) diff --git a/x/deployment.py b/x/deployment.py index 96bda097..b3034ec8 100644 --- a/x/deployment.py +++ b/x/deployment.py @@ -18,7 +18,7 @@ def deploy(filename): # Clean stuff first db.clear() - xs.clear() + xs.Connections.clear() shutil.rmtree(resource_save_path, ignore_errors=True) os.makedirs(resource_save_path) diff --git a/x/observer.py b/x/observer.py index 185eec16..e455d63a 100644 --- a/x/observer.py +++ b/x/observer.py @@ -1,3 +1,6 @@ +from x import signals + + class BaseObserver(object): type_ = None @@ -58,6 +61,14 @@ class BaseObserver(object): return self.receivers.append(receiver) receiver.subscribed(self) + + signals.Connections.add( + self.attached_to, + self.name, + receiver.attached_to, + receiver.name + ) + receiver.notify(self) def subscribed(self, emitter): diff --git a/x/resource.py b/x/resource.py index 6b3c16f2..4574e55f 100644 --- a/x/resource.py +++ b/x/resource.py @@ -156,6 +156,6 @@ def load_all(dest_path): resource = load(resource_path) ret[resource.name] = resource - signals.reconnect_all() + signals.Connections.reconnect_all() return ret diff --git a/x/signals.py b/x/signals.py index 70ac721d..82cdbc82 100644 --- a/x/signals.py +++ b/x/signals.py @@ -13,14 +13,41 @@ CLIENTS_CONFIG_KEY = 'clients-data-file' CLIENTS = utils.read_config_file(CLIENTS_CONFIG_KEY) -def clear(): - global CLIENTS +class Connections(object): + @staticmethod + def add(emitter, src, receiver, dst): + if src not in emitter.args: + return - CLIENTS = {} + CLIENTS.setdefault(emitter.name, {}) + CLIENTS[emitter.name].setdefault(src, []) + CLIENTS[emitter.name][src].append((receiver.name, dst)) - path = utils.read_config()[CLIENTS_CONFIG_KEY] - if os.path.exists(path): - os.remove(path) + utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + + @staticmethod + def reconnect_all(): + """Reconstruct connections for resource inputs from CLIENTS. + + :return: + """ + for emitter_name, dest_dict in CLIENTS.items(): + emitter = db.get_resource(emitter_name) + for emitter_input, destinations in dest_dict.items(): + for receiver_name, receiver_input in destinations: + receiver = db.get_resource(receiver_name) + receiver.args[receiver_input].subscribe( + emitter.args[emitter_input]) + + @staticmethod + def clear(): + global CLIENTS + + CLIENTS = {} + + path = utils.read_config()[CLIENTS_CONFIG_KEY] + if os.path.exists(path): + os.remove(path) def guess_mapping(emitter, receiver): @@ -59,38 +86,11 @@ def connect(emitter, receiver, mapping=None): if receiver.args[dst].type_ != 'list': disconnect_receiver_by_input(receiver, dst) - connect_src_dst(emitter, src, receiver, dst) + emitter.args[src].subscribe(receiver.args[dst]) receiver.save() -def connect_src_dst(emitter, src, receiver, dst): - if src not in emitter.args: - return - - CLIENTS.setdefault(emitter.name, {}) - CLIENTS[emitter.name].setdefault(src, []) - CLIENTS[emitter.name][src].append((receiver.name, dst)) - - emitter.args[src].subscribe(receiver.args[dst]) - - utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) - - -def reconnect_all(): - """Reconstruct connections for resource inputs from CLIENTS. - - :return: - """ - for emitter_name, dest_dict in CLIENTS.items(): - emitter = db.get_resource(emitter_name) - for emitter_input, destinations in dest_dict.items(): - for receiver_name, receiver_input in destinations: - receiver = db.get_resource(receiver_name) - receiver.args[receiver_input].subscribe( - emitter.args[emitter_input]) - - def disconnect(emitter, receiver): for src, destinations in CLIENTS[emitter.name].items(): disconnect_by_src(emitter, src, receiver) diff --git a/x/test/base.py b/x/test/base.py index 6dbceb97..f58e727b 100644 --- a/x/test/base.py +++ b/x/test/base.py @@ -16,7 +16,7 @@ class BaseResourceTest(unittest.TestCase): def tearDown(self): shutil.rmtree(self.storage_dir) db.clear() - xs.clear() + xs.Connections.clear() def make_resource_meta(self, meta_yaml): meta = yaml.load(meta_yaml) From 09209b118b466f896042fcfabf676aee12b38237 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 23 Apr 2015 11:01:04 +0200 Subject: [PATCH 56/87] ListObserver refactored to really represent list, not some dict - self.connections keeps a synced list of connected components - TODO: we can expose self.connected somehow so that there wouldn't be a need to connect by 'name' additionally --- haproxy_deployment/haproxy-deployment.yaml | 25 ++-- haproxy_deployment/haproxy_deployment.py | 62 +++++----- x/handlers/ansible.py | 2 +- x/handlers/base.py | 7 +- x/observer.py | 21 +++- x/resource.py | 2 +- x/resources/docker_container/actions/run.yml | 7 +- x/resources/haproxy/actions/run.yml | 17 +-- x/resources/haproxy/meta.yaml | 4 +- x/resources/haproxy_config/meta.yaml | 1 + x/test/test_signals.py | 123 ++++++++++++++----- 11 files changed, 178 insertions(+), 93 deletions(-) diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index 78b57a10..91982932 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -71,9 +71,10 @@ resources: - name: haproxy_keystone_config model: x/resources/haproxy_config/ args: - servers: {} + name: keystone + servers: [] listen_port: 5000 - ports: {} + ports: [] ssh_user: ssh_key: @@ -112,9 +113,10 @@ resources: - name: haproxy_nova_config model: x/resources/haproxy_config/ args: - servers: {} + name: nova + servers: [] listen_port: 8774 - ports: {} + ports: [] ssh_user: ssh_key: @@ -122,9 +124,10 @@ resources: model: x/resources/haproxy/ args: ip: - listen_ports: {} - configs: {} - configs_ports: {} + listen_ports: [] + configs: [] + configs_names: [] + configs_ports: [] ssh_user: ssh_key: - name: haproxy @@ -132,11 +135,11 @@ resources: args: ip: image: tutum/haproxy - ports: {} + ports: [] ssh_user: ssh_key: - host_binds: {} - volume_binds: {} + host_binds: [] + volume_binds: [] connections: @@ -186,12 +189,14 @@ connections: receiver: haproxy-config mapping: listen_port: listen_ports + name: configs_names ports: configs_ports servers: configs - emitter: haproxy_nova_config receiver: haproxy-config mapping: listen_port: listen_ports + name: configs_names ports: configs_ports servers: configs diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index 99e547a7..5f921283 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -20,17 +20,17 @@ class TestHAProxyDeployment(unittest.TestCase): self.assertEqual( haproxy_keystone_config.args['servers'], - { - 'keystone1': keystone1.args['ip'], - 'keystone2': keystone2.args['ip'], - } + [ + keystone1.args['ip'], + keystone2.args['ip'], + ] ) self.assertEqual( haproxy_keystone_config.args['ports'], - { - 'keystone1': keystone1.args['port'], - 'keystone2': keystone2.args['port'], - } + [ + keystone1.args['port'], + keystone2.args['port'], + ] ) def test_nova_config(self): @@ -49,17 +49,17 @@ class TestHAProxyDeployment(unittest.TestCase): self.assertEqual( haproxy_nova_config.args['servers'], - { - 'nova1': nova1.args['ip'], - 'nova2': nova2.args['ip'], - } + [ + nova1.args['ip'], + nova2.args['ip'], + ] ) self.assertEqual( haproxy_nova_config.args['ports'], - { - 'nova1': nova1.args['port'], - 'nova2': nova2.args['port'], - } + [ + nova1.args['port'], + nova2.args['port'], + ] ) def test_haproxy(self): @@ -74,29 +74,29 @@ class TestHAProxyDeployment(unittest.TestCase): self.assertEqual(node5.args['ssh_user'], haproxy.args['ssh_user']) self.assertEqual( haproxy_config.args['configs'], - { - 'haproxy_keystone_config': haproxy_keystone_config.args['servers'], - 'haproxy_nova_config': haproxy_nova_config.args['servers'], - } + [ + haproxy_keystone_config.args['servers'], + haproxy_nova_config.args['servers'], + ] ) self.assertEqual( haproxy_config.args['configs_ports'], - { - 'haproxy_keystone_config': haproxy_keystone_config.args['ports'], - 'haproxy_nova_config': haproxy_nova_config.args['ports'], - } + [ + haproxy_keystone_config.args['ports'], + haproxy_nova_config.args['ports'], + ] ) self.assertEqual( haproxy_config.args['listen_ports'], - { - 'haproxy_keystone_config': haproxy_keystone_config.args['listen_port'], - 'haproxy_nova_config': haproxy_nova_config.args['listen_port'], - } + [ + haproxy_keystone_config.args['listen_port'], + haproxy_nova_config.args['listen_port'], + ] ) self.assertEqual( - { - 'haproxy-config': haproxy_config.args['config_dir'], - }, + [ + haproxy_config.args['config_dir'], + ], haproxy.args['host_binds'] ) self.assertEqual( diff --git a/x/handlers/ansible.py b/x/handlers/ansible.py index ff9640ef..2a5a1b8e 100644 --- a/x/handlers/ansible.py +++ b/x/handlers/ansible.py @@ -23,7 +23,7 @@ class Ansible(BaseHandler): def _create_inventory(self, r): inventory = '{0} ansible_ssh_host={1} ansible_connection=ssh ansible_ssh_user={2} ansible_ssh_private_key_file={3}' - host, user, ssh_key = r.args['ip'], r.args['ssh_user'], r.args['ssh_key'] + host, user, ssh_key = r.args['ip'].value, r.args['ssh_user'].value, r.args['ssh_key'].value print host print user print ssh_key diff --git a/x/handlers/base.py b/x/handlers/base.py index 6995f090..c2ecf492 100644 --- a/x/handlers/base.py +++ b/x/handlers/base.py @@ -33,16 +33,17 @@ class BaseHandler(object): return dest_file def _compile_file(self, template, dest_file, args): + print 'Rendering', template, args with open(template) as f: tpl = Template(f.read()) - tpl = tpl.render(args) + tpl = tpl.render(args, zip=zip) with open(dest_file, 'w') as g: g.write(tpl) def _make_args(self, resource): - args = {'name' : resource.name} - args.update(resource.args) + args = {'name': resource.name} + args.update(resource.args_dict()) return args diff --git a/x/observer.py b/x/observer.py index e455d63a..40c60546 100644 --- a/x/observer.py +++ b/x/observer.py @@ -122,21 +122,38 @@ class Observer(BaseObserver): class ListObserver(BaseObserver): type_ = 'list' + def __init__(self, *args, **kwargs): + super(ListObserver, self).__init__(*args, **kwargs) + self.connected = [] + def notify(self, emitter): self.log('Notify from {} value {}'.format(emitter, emitter.value)) # Copy emitter's values to receiver - self.value[emitter.attached_to.name] = emitter.value + #self.value[emitter.attached_to.name] = emitter.value + idx = self._connected_idx(emitter) + self.value[idx] = emitter.value for receiver in self.receivers: receiver.notify(self) self.attached_to.save() + def subscribed(self, emitter): + super(ListObserver, self).subscribed(emitter) + self.connected.append((emitter.attached_to.name, emitter.name)) + self.value.append(emitter.value) + def unsubscribed(self, emitter): """ :param receiver: Observer :return: """ self.log('Unsubscribed emitter {}'.format(emitter)) - self.value.pop(emitter.attached_to.name) + #self.value.pop(emitter.attached_to.name) + idx = self._connected_idx(emitter) + self.connected.pop(idx) + self.value.pop(idx) + + def _connected_idx(self, emitter): + return self.connected.index((emitter.attached_to.name, emitter.name)) def create(type_, *args, **kwargs): diff --git a/x/resource.py b/x/resource.py index 4574e55f..b642b86d 100644 --- a/x/resource.py +++ b/x/resource.py @@ -94,7 +94,7 @@ class Resource(object): metadata = copy.deepcopy(self.metadata) metadata['tags'] = self.tags - metadata['args'] = self.args_dict() + metadata['input'] = self.args_dict() meta_file = os.path.join(self.base_dir, 'meta.yaml') with open(meta_file, 'w') as f: diff --git a/x/resources/docker_container/actions/run.yml b/x/resources/docker_container/actions/run.yml index f8ceab11..f811e999 100644 --- a/x/resources/docker_container/actions/run.yml +++ b/x/resources/docker_container/actions/run.yml @@ -10,8 +10,7 @@ image: {{ image }} state: running ports: - {% for name, port in ports.items() %} - # {{ name }} + {% for port in ports %} - {{ port }}:{{ port }} {% endfor %} volumes: @@ -19,6 +18,6 @@ # Currently it's not that trivial to pass custom src: dst here # (when a config variable is passed here from other resource) # so we mount it to the same directory as on host - {% for emitter, bind in host_binds.items() %} - - {{ bind }}:{{ bind }} + {% for bind in host_binds %} + - {{ bind['src'] }}:{{ bind['dst'] }}:{{ bind.get('mode', 'ro') }} {% endfor %} diff --git a/x/resources/haproxy/actions/run.yml b/x/resources/haproxy/actions/run.yml index 7c4a9b35..611a3ed8 100644 --- a/x/resources/haproxy/actions/run.yml +++ b/x/resources/haproxy/actions/run.yml @@ -2,22 +2,23 @@ - hosts: [{{ ip }}] sudo: yes vars: - config_dir: {{ config_dir }} + config_dir: {src: {{ config_dir['src'] }}, dst: {{ config_dir['dst'] }}} haproxy_ip: {{ ip }} haproxy_services: - {% for service, servers in configs.items() %} + {% for service, servers, ports, port in zip(configs_names, configs, configs_ports, listen_ports) %} - name: {{ service }} - listen_port: {{ listen_ports[service] }} + listen_port: {{ port }} servers: - {% for name, ip in servers.items() %} + {% for server_ip, server_port in zip(servers, ports) %} - name: {{ name }} - ip: {{ ip }} - port: {{ configs_ports[service][name] }} + ip: {{ server_ip }} + port: {{ server_port }} {% endfor %} {% endfor %} tasks: - apt: name=python-pip state=present - shell: pip install docker-py - service: name=docker state=started - - file: path=/etc/haproxy/haproxy.cfg state=touch - - template: src=/vagrant/haproxy.cfg dest=/etc/haproxy/haproxy.cfg + - file: path={{ config_dir['src'] }}/ state=directory + - file: path={{ config_dir['src'] }}/haproxy.cfg state=touch + - template: src=/vagrant/haproxy.cfg dest={{ config_dir['src'] }}/haproxy.cfg diff --git a/x/resources/haproxy/meta.yaml b/x/resources/haproxy/meta.yaml index e919f9af..57f53a69 100644 --- a/x/resources/haproxy/meta.yaml +++ b/x/resources/haproxy/meta.yaml @@ -3,13 +3,15 @@ handler: ansible version: 1.0.0 input: ip: - config_dir: /etc/haproxy + config_dir: {src: /etc/solar/haproxy, dst: /etc/haproxy} listen_ports: configs: + configs_names: configs_ports: ssh_user: ssh_key: input-types: listen_ports: list configs: list + configs_names: list configs_ports: list diff --git a/x/resources/haproxy_config/meta.yaml b/x/resources/haproxy_config/meta.yaml index 3ad9355f..fe685683 100644 --- a/x/resources/haproxy_config/meta.yaml +++ b/x/resources/haproxy_config/meta.yaml @@ -2,6 +2,7 @@ id: haproxy_config handler: ansible version: 1.0.0 input: + name: listen_port: ports: servers: diff --git a/x/test/test_signals.py b/x/test/test_signals.py index ea490eba..a6a30cac 100644 --- a/x/test/test_signals.py +++ b/x/test/test_signals.py @@ -51,6 +51,44 @@ input: {'a': 2} ) + def test_multiple_resource_disjoint_connect(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + port: + """) + sample_ip_meta_dir = self.make_resource_meta(""" +id: sample-ip +handler: ansible +version: 1.0.0 +input: + ip: + """) + sample_port_meta_dir = self.make_resource_meta(""" +id: sample-port +handler: ansible +version: 1.0.0 +input: + port: + """) + + sample = self.create_resource( + 'sample', sample_meta_dir, {'ip': None, 'port': None} + ) + sample_ip = self.create_resource( + 'sample-ip', sample_ip_meta_dir, {'ip': '10.0.0.1'} + ) + sample_port = self.create_resource( + 'sample-port', sample_port_meta_dir, {'port': '8000'} + ) + xs.connect(sample_ip, sample) + xs.connect(sample_port, sample) + self.assertEqual(sample.args['ip'], sample_ip.args['ip']) + self.assertEqual(sample.args['port'], sample_port.args['port']) + class TestListInput(base.BaseResourceTest): def test_list_input_single(self): @@ -78,33 +116,33 @@ input-types: 'sample2', sample_meta_dir, {'ip': '10.0.0.2'} ) list_input_single = self.create_resource( - 'list-input-single', list_input_single_meta_dir, {'ips': {}} + 'list-input-single', list_input_single_meta_dir, {'ips': []} ) xs.connect(sample1, list_input_single, mapping={'ip': 'ips'}) self.assertEqual( list_input_single.args['ips'], - { - 'sample1': sample1.args['ip'], - } + [ + sample1.args['ip'], + ] ) xs.connect(sample2, list_input_single, mapping={'ip': 'ips'}) self.assertEqual( list_input_single.args['ips'], - { - 'sample1': sample1.args['ip'], - 'sample2': sample2.args['ip'], - } + [ + sample1.args['ip'], + sample2.args['ip'], + ] ) # Test disconnect xs.disconnect(sample2, list_input_single) self.assertEqual( list_input_single.args['ips'], - { - 'sample1': sample1.args['ip'], - } + [ + sample1.args['ip'], + ] ) def test_list_input_multi(self): @@ -135,37 +173,58 @@ input-types: 'sample2', sample_meta_dir, {'ip': '10.0.0.2', 'port': '1001'} ) list_input_multi = self.create_resource( - 'list-input-multi', list_input_multi_meta_dir, {'ips': {}, 'ports': {}} + 'list-input-multi', list_input_multi_meta_dir, {'ips': [], 'ports': []} ) xs.connect(sample1, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) - self.assertEqual( - list_input_multi.args['ips'], - { - 'sample1': sample1.args['ip'], - } - ) - self.assertEqual( - list_input_multi.args['ports'], - { - 'sample1': sample1.args['port'], - } - ) + self.assertEqual(list_input_multi.args['ips'], [sample1.args['ip']]) + self.assertEqual(list_input_multi.args['ports'], [sample1.args['port']]) xs.connect(sample2, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) self.assertEqual( list_input_multi.args['ips'], - { - 'sample1': sample1.args['ip'], - 'sample2': sample2.args['ip'], - } + [ + sample1.args['ip'], + sample2.args['ip'], + ] ) self.assertEqual( list_input_multi.args['ports'], - { - 'sample1': sample1.args['port'], - 'sample2': sample2.args['port'], - } + [ + sample1.args['port'], + sample2.args['port'], + ] + ) + + +class TestMultiInput(base.BaseResourceTest): + def test_multi_input(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + port: + """) + receiver_meta_dir = self.make_resource_meta(""" +id: receiver +handler: ansible +version: 1.0.0 +input: + server: + """) + + sample = self.create_resource( + 'sample', sample_meta_dir, {'ip': '10.0.0.1', 'port': '5000'} + ) + receiver = self.create_resource( + 'receiver', receiver_meta_dir, {'server': None} + ) + xs.connect(sample, receiver, mapping={'ip, port': 'server'}) + self.assertItemsEqual( + (sample.args['ip'], sample.args['port']), + receiver.args['server'], ) From 87068e67aea39e6f1710655c73e2214d5650bebc Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Thu, 23 Apr 2015 11:16:25 +0200 Subject: [PATCH 57/87] Unsubscribe for simple observer when reconnecting to another observer --- x/observer.py | 37 +++++++++++++++++++++++++------------ x/signals.py | 9 +++++++++ x/test/test_signals.py | 31 +++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 12 deletions(-) diff --git a/x/observer.py b/x/observer.py index 40c60546..b2402d6c 100644 --- a/x/observer.py +++ b/x/observer.py @@ -83,6 +83,14 @@ class BaseObserver(object): if self.find_receiver(receiver): self.receivers.remove(receiver) receiver.unsubscribed(self) + + signals.Connections.remove( + self.attached_to, + self.name, + receiver.attached_to, + receiver.name + ) + # TODO: ? #receiver.notify(self) @@ -95,8 +103,6 @@ class Observer(BaseObserver): def __init__(self, *args, **kwargs): super(Observer, self).__init__(*args, **kwargs) - # TODO: - # Simple observer can be attached to at most one emitter self.emitter = None def notify(self, emitter): @@ -114,9 +120,16 @@ class Observer(BaseObserver): receiver.notify(self) self.attached_to.save() - def subscribe(self, receiver): - # TODO: - super(Observer, self).subscribe(receiver) + def subscribed(self, emitter): + super(Observer, self).subscribed(emitter) + # Simple observer can be attached to at most one emitter + if self.emitter is not None: + self.emitter.unsubscribe(self) + self.emitter = emitter + + def unsubscribed(self, emitter): + super(Observer, self).unsubscribed(emitter) + self.emitter = None class ListObserver(BaseObserver): @@ -124,13 +137,13 @@ class ListObserver(BaseObserver): def __init__(self, *args, **kwargs): super(ListObserver, self).__init__(*args, **kwargs) - self.connected = [] + self.emitters = [] def notify(self, emitter): self.log('Notify from {} value {}'.format(emitter, emitter.value)) # Copy emitter's values to receiver #self.value[emitter.attached_to.name] = emitter.value - idx = self._connected_idx(emitter) + idx = self._emitter_idx(emitter) self.value[idx] = emitter.value for receiver in self.receivers: receiver.notify(self) @@ -138,7 +151,7 @@ class ListObserver(BaseObserver): def subscribed(self, emitter): super(ListObserver, self).subscribed(emitter) - self.connected.append((emitter.attached_to.name, emitter.name)) + self.emitters.append((emitter.attached_to.name, emitter.name)) self.value.append(emitter.value) def unsubscribed(self, emitter): @@ -148,12 +161,12 @@ class ListObserver(BaseObserver): """ self.log('Unsubscribed emitter {}'.format(emitter)) #self.value.pop(emitter.attached_to.name) - idx = self._connected_idx(emitter) - self.connected.pop(idx) + idx = self._emitter_idx(emitter) + self.emitters.pop(idx) self.value.pop(idx) - def _connected_idx(self, emitter): - return self.connected.index((emitter.attached_to.name, emitter.name)) + def _emitter_idx(self, emitter): + return self.emitters.index((emitter.attached_to.name, emitter.name)) def create(type_, *args, **kwargs): diff --git a/x/signals.py b/x/signals.py index 82cdbc82..78478d84 100644 --- a/x/signals.py +++ b/x/signals.py @@ -25,6 +25,15 @@ class Connections(object): utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + @staticmethod + def remove(emitter, src, receiver, dst): + CLIENTS[emitter.name][src] = [ + destination for destination in CLIENTS[emitter.name][src] + if destination != (receiver.name, dst) + ] + + utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) + @staticmethod def reconnect_all(): """Reconstruct connections for resource inputs from CLIENTS. diff --git a/x/test/test_signals.py b/x/test/test_signals.py index a6a30cac..facb013c 100644 --- a/x/test/test_signals.py +++ b/x/test/test_signals.py @@ -89,6 +89,37 @@ input: self.assertEqual(sample.args['ip'], sample_ip.args['ip']) self.assertEqual(sample.args['port'], sample_port.args['port']) + def test_simple_observer_unsubscription(self): + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + """) + + sample = self.create_resource( + 'sample', sample_meta_dir, {'ip': None} + ) + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'ip': '10.0.0.1'} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'ip': '10.0.0.2'} + ) + + xs.connect(sample1, sample) + self.assertEqual(sample1.args['ip'], sample.args['ip']) + self.assertEqual(len(sample1.args['ip'].receivers), 1) + + xs.connect(sample2, sample) + self.assertEqual(sample2.args['ip'], sample.args['ip']) + # sample should be unsubscribed from sample1 and subscribed to sample2 + self.assertEqual(len(sample1.args['ip'].receivers), 0) + + sample1.update({'ip': '10.0.0.3'}) + self.assertEqual(sample2.args['ip'], sample.args['ip']) + class TestListInput(base.BaseResourceTest): def test_list_input_single(self): From f9686f7c9872e5afc5a42e118018c00412f71b78 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Fri, 24 Apr 2015 12:39:05 +0200 Subject: [PATCH 58/87] Found, fixed and tested simple circular connection --- x/signals.py | 15 ++++++++++----- x/test/test_signals.py | 21 +++++++++++++++++++++ 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/x/signals.py b/x/signals.py index 78478d84..111afd68 100644 --- a/x/signals.py +++ b/x/signals.py @@ -19,9 +19,14 @@ class Connections(object): if src not in emitter.args: return + # TODO: implement general circular detection, this one is simple + if [emitter.name, src] in CLIENTS.get(receiver.name, {}).get(dst, []): + raise Exception('Attempted to create cycle in dependencies. Not nice.') + CLIENTS.setdefault(emitter.name, {}) CLIENTS[emitter.name].setdefault(src, []) - CLIENTS[emitter.name][src].append((receiver.name, dst)) + if [receiver.name, dst] not in CLIENTS[emitter.name][src]: + CLIENTS[emitter.name][src].append([receiver.name, dst]) utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) @@ -29,7 +34,7 @@ class Connections(object): def remove(emitter, src, receiver, dst): CLIENTS[emitter.name][src] = [ destination for destination in CLIENTS[emitter.name][src] - if destination != (receiver.name, dst) + if destination != [receiver.name, dst] ] utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS) @@ -45,8 +50,8 @@ class Connections(object): for emitter_input, destinations in dest_dict.items(): for receiver_name, receiver_input in destinations: receiver = db.get_resource(receiver_name) - receiver.args[receiver_input].subscribe( - emitter.args[emitter_input]) + emitter.args[emitter_input].subscribe( + receiver.args[receiver_input]) @staticmethod def clear(): @@ -151,7 +156,7 @@ def assign_connections(receiver, connections): mappings = defaultdict(list) for key, dest in connections.iteritems(): resource, r_key = dest.split('.') - mappings[resource].append((r_key, key)) + mappings[resource].append([r_key, key]) for resource, r_mappings in mappings.iteritems(): connect(resource, receiver, r_mappings) diff --git a/x/test/test_signals.py b/x/test/test_signals.py index facb013c..ecfd099a 100644 --- a/x/test/test_signals.py +++ b/x/test/test_signals.py @@ -120,6 +120,27 @@ input: sample1.update({'ip': '10.0.0.3'}) self.assertEqual(sample2.args['ip'], sample.args['ip']) + def test_circular_connection_prevention(self): + # TODO: more complex cases + sample_meta_dir = self.make_resource_meta(""" +id: sample +handler: ansible +version: 1.0.0 +input: + ip: + """) + + sample1 = self.create_resource( + 'sample1', sample_meta_dir, {'ip': '10.0.0.1'} + ) + sample2 = self.create_resource( + 'sample2', sample_meta_dir, {'ip': '10.0.0.2'} + ) + xs.connect(sample1, sample2) + + with self.assertRaises(Exception): + xs.connect(sample2, sample1) + class TestListInput(base.BaseResourceTest): def test_list_input_single(self): From e2bb4284ae07c82897d117f4e69b56fc1dbf0ad6 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 27 Apr 2015 11:37:05 +0200 Subject: [PATCH 59/87] ListObserver fixes ListObserver needs to save emitter data to args. This is because it saves values, when we re-do connections we need to do them in the same order as for values. This can be only reliably handled when we save emitter along with values. --- haproxy_deployment/haproxy_deployment.py | 16 ++--- x/handlers/base.py | 2 +- x/observer.py | 34 ++++++--- x/resource.py | 16 ++++- x/resources/docker_container/actions/run.yml | 8 +-- x/resources/haproxy/actions/run.yml | 22 +++--- x/test/test_signals.py | 74 ++++++++++++++++++-- 7 files changed, 130 insertions(+), 42 deletions(-) diff --git a/haproxy_deployment/haproxy_deployment.py b/haproxy_deployment/haproxy_deployment.py index 5f921283..120d19d5 100644 --- a/haproxy_deployment/haproxy_deployment.py +++ b/haproxy_deployment/haproxy_deployment.py @@ -19,14 +19,14 @@ class TestHAProxyDeployment(unittest.TestCase): haproxy_keystone_config = db.get_resource('haproxy_keystone_config') self.assertEqual( - haproxy_keystone_config.args['servers'], + [ip['value'] for ip in haproxy_keystone_config.args['servers'].value], [ keystone1.args['ip'], keystone2.args['ip'], ] ) self.assertEqual( - haproxy_keystone_config.args['ports'], + [p['value'] for p in haproxy_keystone_config.args['ports'].value], [ keystone1.args['port'], keystone2.args['port'], @@ -48,14 +48,14 @@ class TestHAProxyDeployment(unittest.TestCase): haproxy_nova_config = db.get_resource('haproxy_nova_config') self.assertEqual( - haproxy_nova_config.args['servers'], + [ip['value'] for ip in haproxy_nova_config.args['servers'].value], [ nova1.args['ip'], nova2.args['ip'], ] ) self.assertEqual( - haproxy_nova_config.args['ports'], + [p['value'] for p in haproxy_nova_config.args['ports'].value], [ nova1.args['port'], nova2.args['port'], @@ -73,21 +73,21 @@ class TestHAProxyDeployment(unittest.TestCase): self.assertEqual(node5.args['ssh_key'], haproxy.args['ssh_key']) self.assertEqual(node5.args['ssh_user'], haproxy.args['ssh_user']) self.assertEqual( - haproxy_config.args['configs'], + [c['value'] for c in haproxy_config.args['configs'].value], [ haproxy_keystone_config.args['servers'], haproxy_nova_config.args['servers'], ] ) self.assertEqual( - haproxy_config.args['configs_ports'], + [cp['value'] for cp in haproxy_config.args['configs_ports'].value], [ haproxy_keystone_config.args['ports'], haproxy_nova_config.args['ports'], ] ) self.assertEqual( - haproxy_config.args['listen_ports'], + [lp['value'] for lp in haproxy_config.args['listen_ports'].value], [ haproxy_keystone_config.args['listen_port'], haproxy_nova_config.args['listen_port'], @@ -97,7 +97,7 @@ class TestHAProxyDeployment(unittest.TestCase): [ haproxy_config.args['config_dir'], ], - haproxy.args['host_binds'] + [hb['value'] for hb in haproxy.args['host_binds'].value] ) self.assertEqual( haproxy.args['ports'], diff --git a/x/handlers/base.py b/x/handlers/base.py index c2ecf492..457d8b6c 100644 --- a/x/handlers/base.py +++ b/x/handlers/base.py @@ -43,7 +43,7 @@ class BaseHandler(object): def _make_args(self, resource): args = {'name': resource.name} - args.update(resource.args_dict()) + args.update(resource.args) return args diff --git a/x/observer.py b/x/observer.py index b2402d6c..7f0d7f0f 100644 --- a/x/observer.py +++ b/x/observer.py @@ -20,7 +20,10 @@ class BaseObserver(object): print '{} {}'.format(self, msg) def __repr__(self): - return '[{}:{}]'.format(self.attached_to.name, self.name) + return '[{}:{}] {}'.format(self.attached_to.name, self.name, self.value) + + def __unicode__(self): + return self.value def __eq__(self, other): if isinstance(other, BaseObserver): @@ -135,24 +138,32 @@ class Observer(BaseObserver): class ListObserver(BaseObserver): type_ = 'list' - def __init__(self, *args, **kwargs): - super(ListObserver, self).__init__(*args, **kwargs) - self.emitters = [] + def __unicode__(self): + return unicode(self.value) + + @staticmethod + def _format_value(emitter): + return { + 'emitter': emitter.name, + 'emitter_attached_to': emitter.attached_to.name, + 'value': emitter.value, + } def notify(self, emitter): self.log('Notify from {} value {}'.format(emitter, emitter.value)) # Copy emitter's values to receiver #self.value[emitter.attached_to.name] = emitter.value idx = self._emitter_idx(emitter) - self.value[idx] = emitter.value + self.value[idx] = self._format_value(emitter) for receiver in self.receivers: receiver.notify(self) self.attached_to.save() def subscribed(self, emitter): super(ListObserver, self).subscribed(emitter) - self.emitters.append((emitter.attached_to.name, emitter.name)) - self.value.append(emitter.value) + idx = self._emitter_idx(emitter) + if idx is None: + self.value.append(self._format_value(emitter)) def unsubscribed(self, emitter): """ @@ -160,13 +171,16 @@ class ListObserver(BaseObserver): :return: """ self.log('Unsubscribed emitter {}'.format(emitter)) - #self.value.pop(emitter.attached_to.name) idx = self._emitter_idx(emitter) - self.emitters.pop(idx) self.value.pop(idx) def _emitter_idx(self, emitter): - return self.emitters.index((emitter.attached_to.name, emitter.name)) + try: + return [i for i, e in enumerate(self.value) + if e['emitter_attached_to'] == emitter.attached_to.name + ][0] + except IndexError: + return def create(type_, *args, **kwargs): diff --git a/x/resource.py b/x/resource.py index b642b86d..d46e9b16 100644 --- a/x/resource.py +++ b/x/resource.py @@ -34,10 +34,24 @@ class Resource(object): return ("Resource('name={0}', metadata={1}, args={2}, " "base_dir='{3}', tags={4})").format(self.name, json.dumps(self.metadata), - json.dumps(self.args_dict()), + json.dumps(self.args_show()), self.base_dir, self.tags) + def args_show(self): + def formatter(v): + if isinstance(v, observer.ListObserver): + return v.value + elif isinstance(v, observer.Observer): + return { + 'emitter': v.emitter.attached_to.name if v.emitter else None, + 'value': v.value, + } + + return v + + return {k: formatter(v) for k, v in self.args.items()} + def args_dict(self): return {k: v.value for k, v in self.args.items()} diff --git a/x/resources/docker_container/actions/run.yml b/x/resources/docker_container/actions/run.yml index f811e999..a04d1a30 100644 --- a/x/resources/docker_container/actions/run.yml +++ b/x/resources/docker_container/actions/run.yml @@ -10,14 +10,14 @@ image: {{ image }} state: running ports: - {% for port in ports %} - - {{ port }}:{{ port }} + {% for port in ports.value %} + - {{ port['value'] }}:{{ port['value'] }} {% endfor %} volumes: # TODO: host_binds might need more work # Currently it's not that trivial to pass custom src: dst here # (when a config variable is passed here from other resource) # so we mount it to the same directory as on host - {% for bind in host_binds %} - - {{ bind['src'] }}:{{ bind['dst'] }}:{{ bind.get('mode', 'ro') }} + {% for bind in host_binds.value %} + - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} {% endfor %} diff --git a/x/resources/haproxy/actions/run.yml b/x/resources/haproxy/actions/run.yml index 611a3ed8..4e71c532 100644 --- a/x/resources/haproxy/actions/run.yml +++ b/x/resources/haproxy/actions/run.yml @@ -2,23 +2,23 @@ - hosts: [{{ ip }}] sudo: yes vars: - config_dir: {src: {{ config_dir['src'] }}, dst: {{ config_dir['dst'] }}} + config_dir: {src: {{ config_dir.value['src'] }}, dst: {{ config_dir.value['dst'] }}} haproxy_ip: {{ ip }} haproxy_services: - {% for service, servers, ports, port in zip(configs_names, configs, configs_ports, listen_ports) %} - - name: {{ service }} - listen_port: {{ port }} + {% for service, ports, listen_port in zip(configs.value, configs_ports.value, listen_ports.value) %} + - name: {{ service['emitter_attached_to'] }} + listen_port: {{ listen_port['value'] }} servers: - {% for server_ip, server_port in zip(servers, ports) %} - - name: {{ name }} - ip: {{ server_ip }} - port: {{ server_port }} + {% for server_ip, server_port in zip(service['value'], ports['value']) %} + - name: {{ server_ip['emitter_attached_to'] }} + ip: {{ server_ip['value'] }} + port: {{ server_port['value'] }} {% endfor %} {% endfor %} tasks: - apt: name=python-pip state=present - shell: pip install docker-py - service: name=docker state=started - - file: path={{ config_dir['src'] }}/ state=directory - - file: path={{ config_dir['src'] }}/haproxy.cfg state=touch - - template: src=/vagrant/haproxy.cfg dest={{ config_dir['src'] }}/haproxy.cfg + - file: path={{ config_dir.value['src'] }}/ state=directory + - file: path={{ config_dir.value['src'] }}/haproxy.cfg state=touch + - template: src=/vagrant/haproxy.cfg dest={{ config_dir.value['src'] }}/haproxy.cfg diff --git a/x/test/test_signals.py b/x/test/test_signals.py index ecfd099a..6647fb5d 100644 --- a/x/test/test_signals.py +++ b/x/test/test_signals.py @@ -26,6 +26,10 @@ input: sample1.args['values'], sample2.args['values'], ) + self.assertEqual( + sample2.args['values'].emitter, + sample1.args['values'] + ) # Check update sample1.update({'values': {'a': 2}}) @@ -50,6 +54,7 @@ input: sample2.args['values'], {'a': 2} ) + self.assertEqual(sample2.args['values'].emitter, None) def test_multiple_resource_disjoint_connect(self): sample_meta_dir = self.make_resource_meta(""" @@ -88,6 +93,14 @@ input: xs.connect(sample_port, sample) self.assertEqual(sample.args['ip'], sample_ip.args['ip']) self.assertEqual(sample.args['port'], sample_port.args['port']) + self.assertEqual( + sample.args['ip'].emitter, + sample_ip.args['ip'] + ) + self.assertEqual( + sample.args['port'].emitter, + sample_port.args['port'] + ) def test_simple_observer_unsubscription(self): sample_meta_dir = self.make_resource_meta(""" @@ -111,11 +124,19 @@ input: xs.connect(sample1, sample) self.assertEqual(sample1.args['ip'], sample.args['ip']) self.assertEqual(len(sample1.args['ip'].receivers), 1) + self.assertEqual( + sample.args['ip'].emitter, + sample1.args['ip'] + ) xs.connect(sample2, sample) self.assertEqual(sample2.args['ip'], sample.args['ip']) # sample should be unsubscribed from sample1 and subscribed to sample2 self.assertEqual(len(sample1.args['ip'].receivers), 0) + self.assertEqual( + sample.args['ip'].emitter, + sample2.args['ip'] + ) sample1.update({'ip': '10.0.0.3'}) self.assertEqual(sample2.args['ip'], sample.args['ip']) @@ -173,15 +194,34 @@ input-types: xs.connect(sample1, list_input_single, mapping={'ip': 'ips'}) self.assertEqual( - list_input_single.args['ips'], + [ip['value'] for ip in list_input_single.args['ips'].value], [ sample1.args['ip'], ] ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_single.args['ips'].value], + [(sample1.args['ip'].attached_to.name, 'ip')] + ) xs.connect(sample2, list_input_single, mapping={'ip': 'ips'}) self.assertEqual( - list_input_single.args['ips'], + [ip['value'] for ip in list_input_single.args['ips'].value], + [ + sample1.args['ip'], + sample2.args['ip'], + ] + ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_single.args['ips'].value], + [(sample1.args['ip'].attached_to.name, 'ip'), + (sample2.args['ip'].attached_to.name, 'ip')] + ) + + # Test update + sample2.update({'ip': '10.0.0.3'}) + self.assertEqual( + [ip['value'] for ip in list_input_single.args['ips'].value], [ sample1.args['ip'], sample2.args['ip'], @@ -191,11 +231,15 @@ input-types: # Test disconnect xs.disconnect(sample2, list_input_single) self.assertEqual( - list_input_single.args['ips'], + [ip['value'] for ip in list_input_single.args['ips'].value], [ sample1.args['ip'], ] ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_single.args['ips'].value], + [(sample1.args['ip'].attached_to.name, 'ip')] + ) def test_list_input_multi(self): sample_meta_dir = self.make_resource_meta(""" @@ -229,24 +273,40 @@ input-types: ) xs.connect(sample1, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) - self.assertEqual(list_input_multi.args['ips'], [sample1.args['ip']]) - self.assertEqual(list_input_multi.args['ports'], [sample1.args['port']]) + self.assertEqual( + [ip['value'] for ip in list_input_multi.args['ips'].value], + [sample1.args['ip']] + ) + self.assertEqual( + [p['value'] for p in list_input_multi.args['ports'].value], + [sample1.args['port']] + ) xs.connect(sample2, list_input_multi, mapping={'ip': 'ips', 'port': 'ports'}) self.assertEqual( - list_input_multi.args['ips'], + [ip['value'] for ip in list_input_multi.args['ips'].value], [ sample1.args['ip'], sample2.args['ip'], ] ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_multi.args['ips'].value], + [(sample1.args['ip'].attached_to.name, 'ip'), + (sample2.args['ip'].attached_to.name, 'ip')] + ) self.assertEqual( - list_input_multi.args['ports'], + [p['value'] for p in list_input_multi.args['ports'].value], [ sample1.args['port'], sample2.args['port'], ] ) + self.assertListEqual( + [(e['emitter_attached_to'], e['emitter']) for e in list_input_multi.args['ports'].value], + [(sample1.args['port'].attached_to.name, 'port'), + (sample2.args['port'].attached_to.name, 'port')] + ) class TestMultiInput(base.BaseResourceTest): From 83a4050305647353d191cc07c8fed3fd44e407cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 28 Apr 2015 09:50:07 +0000 Subject: [PATCH 60/87] Add resource path to args --- x/handlers/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/x/handlers/base.py b/x/handlers/base.py index 6995f090..007d7f27 100644 --- a/x/handlers/base.py +++ b/x/handlers/base.py @@ -42,6 +42,7 @@ class BaseHandler(object): def _make_args(self, resource): args = {'name' : resource.name} + args['resource_dir'] = resource.base_dir args.update(resource.args) return args From 1c9e116f4f9dc7dfe5f8b4afc3ec6a4b60a337d5 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Tue, 28 Apr 2015 11:49:51 +0200 Subject: [PATCH 61/87] haproxy-deployment.yaml fixes according to new keystone inputs --- haproxy_deployment/haproxy-deployment.yaml | 10 ++++++++-- x/resources/keystone/actions/run.yml | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/haproxy_deployment/haproxy-deployment.yaml b/haproxy_deployment/haproxy-deployment.yaml index 91982932..d0350fdb 100755 --- a/haproxy_deployment/haproxy-deployment.yaml +++ b/haproxy_deployment/haproxy-deployment.yaml @@ -57,15 +57,21 @@ resources: - name: keystone1 model: x/resources/keystone/ args: - ip: + admin_port: 35357 + port: 5000 image: TEST + config_dir: /etc/solar/keystone1 + ip: ssh_user: ssh_key: - name: keystone2 model: x/resources/keystone/ args: - ip: + admin_port: 35357 + port: 5000 + config_dir: /etc/solar/keystone2 image: TEST + ip: ssh_user: ssh_key: - name: haproxy_keystone_config diff --git a/x/resources/keystone/actions/run.yml b/x/resources/keystone/actions/run.yml index 7146e52d..7eb81109 100644 --- a/x/resources/keystone/actions/run.yml +++ b/x/resources/keystone/actions/run.yml @@ -9,5 +9,5 @@ ports: - {{ port }}:5000 - {{ admin_port }}:35357 - volumnes: + volumes: - {{ config_dir }}:/etc/keystone From 2ed77ee40a9cc5a404791aea945c2806b4d5d3db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 28 Apr 2015 09:50:38 +0000 Subject: [PATCH 62/87] Stylistic changes --- x/resource.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x/resource.py b/x/resource.py index 895105d1..a27e68a4 100644 --- a/x/resource.py +++ b/x/resource.py @@ -27,7 +27,7 @@ class Resource(object): self.tags = tags or [] def __repr__(self): - return ("Resource('name={0}', metadata={1}, args={2}, " + return ("Resource(name='{0}', metadata={1}, args={2}, " "base_dir='{3}', tags={4})").format(self.name, json.dumps(self.metadata), json.dumps(self.args), @@ -79,7 +79,7 @@ class Resource(object): meta_file = os.path.join(self.base_dir, 'meta.yaml') with open(meta_file, 'w') as f: - f.write(yaml.dump(self.metadata)) + f.write(yaml.dump(self.metadata, default_flow_style=False)) def create(name, base_path, dest_path, args, connections={}): From 26e605112bc437eb234bef795b4e7131992aa98f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 28 Apr 2015 09:52:44 +0000 Subject: [PATCH 63/87] Allow connections from any host --- x/resources/mariadb_user/actions/run.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/x/resources/mariadb_user/actions/run.yml b/x/resources/mariadb_user/actions/run.yml index 7f50378a..98beea11 100644 --- a/x/resources/mariadb_user/actions/run.yml +++ b/x/resources/mariadb_user/actions/run.yml @@ -6,6 +6,7 @@ name: {{name}} password: {{password}} priv: {{db}}.*:ALL + host: '%' state: present login_user: root login_password: {{login_password}} From f64957b72aa65c5b2fa443b576be7ca85b18baf4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 28 Apr 2015 09:55:20 +0000 Subject: [PATCH 64/87] Rename keystone resource to keystone_service --- .gitignore | 3 +++ x/resources/{keystone => keystone_service}/actions/remove.yml | 0 x/resources/{keystone => keystone_service}/actions/run.yml | 2 +- x/resources/{keystone => keystone_service}/meta.yaml | 2 +- 4 files changed, 5 insertions(+), 2 deletions(-) rename x/resources/{keystone => keystone_service}/actions/remove.yml (100%) rename x/resources/{keystone => keystone_service}/actions/run.yml (93%) rename x/resources/{keystone => keystone_service}/meta.yaml (100%) diff --git a/.gitignore b/.gitignore index 83a9627c..043c504a 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,6 @@ .vagrant tmp/ + +#vim +*.swp diff --git a/x/resources/keystone/actions/remove.yml b/x/resources/keystone_service/actions/remove.yml similarity index 100% rename from x/resources/keystone/actions/remove.yml rename to x/resources/keystone_service/actions/remove.yml diff --git a/x/resources/keystone/actions/run.yml b/x/resources/keystone_service/actions/run.yml similarity index 93% rename from x/resources/keystone/actions/run.yml rename to x/resources/keystone_service/actions/run.yml index 7146e52d..7eb81109 100644 --- a/x/resources/keystone/actions/run.yml +++ b/x/resources/keystone_service/actions/run.yml @@ -9,5 +9,5 @@ ports: - {{ port }}:5000 - {{ admin_port }}:35357 - volumnes: + volumes: - {{ config_dir }}:/etc/keystone diff --git a/x/resources/keystone/meta.yaml b/x/resources/keystone_service/meta.yaml similarity index 100% rename from x/resources/keystone/meta.yaml rename to x/resources/keystone_service/meta.yaml index fbc98de0..1e3add8d 100644 --- a/x/resources/keystone/meta.yaml +++ b/x/resources/keystone_service/meta.yaml @@ -4,8 +4,8 @@ version: 1.0.0 input: image: kollaglue/centos-rdo-keystone config_dir: - admin_port: port: + admin_port: ip: ssh_key: ssh_user: From 4a5d239760898ee1e40fcfb04f5241ee5d92938b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 28 Apr 2015 09:58:58 +0000 Subject: [PATCH 65/87] Keystone config resource --- .../keystone_config/actions/remove.yml | 4 + x/resources/keystone_config/actions/run.yml | 14 + x/resources/keystone_config/meta.yaml | 13 + .../templates/default_catalog.templates | 27 + .../keystone_config/templates/keystone.conf | 1589 +++++++++++++++++ .../keystone_config/templates/logging.conf | 65 + .../keystone_config/templates/policy.json | 171 ++ 7 files changed, 1883 insertions(+) create mode 100644 x/resources/keystone_config/actions/remove.yml create mode 100644 x/resources/keystone_config/actions/run.yml create mode 100644 x/resources/keystone_config/meta.yaml create mode 100644 x/resources/keystone_config/templates/default_catalog.templates create mode 100644 x/resources/keystone_config/templates/keystone.conf create mode 100644 x/resources/keystone_config/templates/logging.conf create mode 100644 x/resources/keystone_config/templates/policy.json diff --git a/x/resources/keystone_config/actions/remove.yml b/x/resources/keystone_config/actions/remove.yml new file mode 100644 index 00000000..7e452a44 --- /dev/null +++ b/x/resources/keystone_config/actions/remove.yml @@ -0,0 +1,4 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - file: path={{config_dir}} state=absent diff --git a/x/resources/keystone_config/actions/run.yml b/x/resources/keystone_config/actions/run.yml new file mode 100644 index 00000000..e24d0fae --- /dev/null +++ b/x/resources/keystone_config/actions/run.yml @@ -0,0 +1,14 @@ +- hosts: [{{ ip }}] + sudo: yes + vars: + admin_token: {{admin_token}} + db_user: {{db_user}} + db_password: {{db_password}} + db_host: {{db_host}} + db_name: {{db_name}} + tasks: + - file: path={{config_dir}} state=directory + - template: src={{resource_dir}}/templates/keystone.conf dest={{config_dir}}/keystone.conf + - template: src={{resource_dir}}/templates/default_catalog.templates dest={{config_dir}}/default_catalog.templates + - template: src={{resource_dir}}/templates/logging.conf dest={{config_dir}}/logging.conf + - template: src={{resource_dir}}/templates/policy.json dest={{config_dir}}/policy.json diff --git a/x/resources/keystone_config/meta.yaml b/x/resources/keystone_config/meta.yaml new file mode 100644 index 00000000..b4ea7ce6 --- /dev/null +++ b/x/resources/keystone_config/meta.yaml @@ -0,0 +1,13 @@ +id: keystone_config +handler: ansible +version: 1.0.0 +input: + config_dir: + admin_token: + db_user: + db_password: + db_host: + db_name: + ip: + ssh_key: + ssh_user: diff --git a/x/resources/keystone_config/templates/default_catalog.templates b/x/resources/keystone_config/templates/default_catalog.templates new file mode 100644 index 00000000..a69b7f06 --- /dev/null +++ b/x/resources/keystone_config/templates/default_catalog.templates @@ -0,0 +1,27 @@ +# config for templated.Catalog, using camelCase because I don't want to do +# translations for keystone compat +catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0 +catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0 +catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0 +catalog.RegionOne.identity.name = Identity Service + +# fake compute service for now to help novaclient tests work +catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s +catalog.RegionOne.compute.name = Compute Service + +catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s +catalog.RegionOne.volume.name = Volume Service + +catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud +catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin +catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud +catalog.RegionOne.ec2.name = EC2 Service + +catalog.RegionOne.image.publicURL = http://localhost:9292/v1 +catalog.RegionOne.image.adminURL = http://localhost:9292/v1 +catalog.RegionOne.image.internalURL = http://localhost:9292/v1 +catalog.RegionOne.image.name = Image Service diff --git a/x/resources/keystone_config/templates/keystone.conf b/x/resources/keystone_config/templates/keystone.conf new file mode 100644 index 00000000..e8bfb466 --- /dev/null +++ b/x/resources/keystone_config/templates/keystone.conf @@ -0,0 +1,1589 @@ +[DEFAULT] + +# +# Options defined in keystone +# + +# A "shared secret" that can be used to bootstrap Keystone. +# This "token" does not represent a user, and carries no +# explicit authorization. To disable in production (highly +# recommended), remove AdminTokenAuthMiddleware from your +# paste application pipelines (for example, in keystone- +# paste.ini). (string value) +admin_token={{admin_token}} + +# The IP address of the network interface for the public +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#public_bind_host=0.0.0.0 + +# The IP address of the network interface for the admin +# service to listen on. (string value) +# Deprecated group/name - [DEFAULT]/bind_host +#admin_bind_host=0.0.0.0 + +# (Deprecated) The port which the OpenStack Compute service +# listens on. This option was only used for string replacement +# in the templated catalog backend. Templated catalogs should +# replace the "$(compute_port)s" substitution with the static +# port of the compute service. As of Juno, this option is +# deprecated and will be removed in the L release. (integer +# value) +#compute_port=8774 + +# The port number which the admin service listens on. (integer +# value) +admin_port=35357 + +# The port number which the public service listens on. +# (integer value) +public_port=5000 + +# The base public endpoint URL for Keystone that is advertised +# to clients (NOTE: this does NOT affect how Keystone listens +# for connections). Defaults to the base host URL of the +# request. E.g. a request to http://server:5000/v2.0/users +# will default to http://server:5000. You should only need to +# set this value if the base URL contains a path (e.g. +# /prefix/v2.0) or the endpoint should be found on a different +# server. (string value) +#public_endpoint= + +# The base admin endpoint URL for Keystone that is advertised +# to clients (NOTE: this does NOT affect how Keystone listens +# for connections). Defaults to the base host URL of the +# request. E.g. a request to http://server:35357/v2.0/users +# will default to http://server:35357. You should only need to +# set this value if the base URL contains a path (e.g. +# /prefix/v2.0) or the endpoint should be found on a different +# server. (string value) +#admin_endpoint= + +# The number of worker processes to serve the public WSGI +# application. Defaults to number of CPUs (minimum of 2). +# (integer value) +#public_workers= + +# The number of worker processes to serve the admin WSGI +# application. Defaults to number of CPUs (minimum of 2). +# (integer value) +#admin_workers= + +# Enforced by optional sizelimit middleware +# (keystone.middleware:RequestBodySizeLimiter). (integer +# value) +#max_request_body_size=114688 + +# Limit the sizes of user & project ID/names. (integer value) +#max_param_size=64 + +# Similar to max_param_size, but provides an exception for +# token values. (integer value) +#max_token_size=8192 + +# During a SQL upgrade member_role_id will be used to create a +# new role that will replace records in the assignment table +# with explicit role grants. After migration, the +# member_role_id will be used in the API add_user_to_project. +# (string value) +#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab + +# During a SQL upgrade member_role_name will be used to create +# a new role that will replace records in the assignment table +# with explicit role grants. After migration, member_role_name +# will be ignored. (string value) +#member_role_name=_member_ + +# The value passed as the keyword "rounds" to passlib's +# encrypt method. (integer value) +#crypt_strength=40000 + +# Set this to true if you want to enable TCP_KEEPALIVE on +# server sockets, i.e. sockets used by the Keystone wsgi +# server for client connections. (boolean value) +#tcp_keepalive=false + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Only applies if tcp_keepalive is true. Not supported +# on OS X. (integer value) +#tcp_keepidle=600 + +# The maximum number of entities that will be returned in a +# collection, with no limit set by default. This global limit +# may be then overridden for a specific driver, by specifying +# a list_limit in the appropriate section (e.g. [assignment]). +# (integer value) +#list_limit= + +# Set this to false if you want to enable the ability for +# user, group and project entities to be moved between domains +# by updating their domain_id. Allowing such movement is not +# recommended if the scope of a domain admin is being +# restricted by use of an appropriate policy file (see +# policy.v3cloudsample as an example). (boolean value) +#domain_id_immutable=true + +# If set to true, strict password length checking is performed +# for password manipulation. If a password exceeds the maximum +# length, the operation will fail with an HTTP 403 Forbidden +# error. If set to false, passwords are automatically +# truncated to the maximum length. (boolean value) +#strict_password_check=false + + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer +# value) +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +#rabbit_userid=guest + +# The RabbitMQ password. (string value) +#rabbit_password=guest + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=keystone + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=keystone + + +# +# Options defined in keystone.notifications +# + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in keystone.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in keystone.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error. (boolean value) +#use_stderr=false + +# Format string to use for log messages with context. (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN + +# Enables or disables publication of error events. (boolean +# value) +#publish_errors=false + +# Enables or disables fatal status of deprecations. (boolean +# value) +#fatal_deprecations=false + +# The format for an instance that is passed with the log +# message. (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log +# message. (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of a logging configuration file. This file is +# appended to any existing logging configuration files. For +# details about logging configuration files, see the Python +# logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. (string value) +# If not set here, logging will go to /var/log/keystone/keystone.log, +# default from keystone-dist.conf. +# Deprecated group/name - [DEFAULT]/logfile +#log_file=/var/log/keystone/keystone.log + +# (Optional) The base directory used for relative --log-file +# paths. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and will change in J to honor RFC5424. (boolean +# value) +#use_syslog=false + +# (Optional) Enables or disables syslog rfc5424 format for +# logging. If enabled, prefixes the MSG part of the syslog +# message with APP-NAME (RFC5424). The format without the APP- +# NAME is deprecated in I, and will be removed in J. (boolean +# value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in keystone.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. +# (string value) +#policy_default_rule=default + + +[assignment] + +# +# Options defined in keystone +# + +# Assignment backend driver. (string value) +#driver= + +# Toggle for assignment caching. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# TTL (in seconds) to cache assignment data. This has no +# effect unless global caching is enabled. (integer value) +#cache_time= + +# Maximum number of entities that will be returned in an +# assignment collection. (integer value) +#list_limit= + + +[auth] + +# +# Options defined in keystone +# + +# Default auth methods. (list value) +#methods=external,password,token + +# The password auth plugin module. (string value) +#password=keystone.auth.plugins.password.Password + +# The token auth plugin module. (string value) +#token=keystone.auth.plugins.token.Token + +# The external (REMOTE_USER) auth plugin module. (string +# value) +#external=keystone.auth.plugins.external.DefaultDomain + + +[cache] + +# +# Options defined in keystone +# + +# Prefix for building the configuration dictionary for the +# cache region. This should not need to be changed unless +# there is another dogpile.cache region with the same +# configuration name. (string value) +#config_prefix=cache.keystone + +# Default TTL, in seconds, for any cached item in the +# dogpile.cache region. This applies to any cached method that +# doesn't have an explicit cache expiration time defined for +# it. (integer value) +#expiration_time=600 + +# Dogpile.cache backend module. It is recommended that +# Memcache with pooling (keystone.cache.memcache_pool) or +# Redis (dogpile.cache.redis) be used in production +# deployments. Small workloads (single process) like devstack +# can use the dogpile.cache.memory backend. (string value) +#backend=keystone.common.cache.noop + +# Arguments supplied to the backend module. Specify this +# option once per argument to be passed to the dogpile.cache +# backend. Example format: ":". (multi valued) +#backend_argument= + +# Proxy classes to import that will affect the way the +# dogpile.cache backend functions. See the dogpile.cache +# documentation on changing-backend-behavior. (list value) +#proxies= + +# Global toggle for all caching using the should_cache_fn +# mechanism. (boolean value) +#enabled=false + +# Extra debugging from the cache backend (cache keys, +# get/set/delete/etc calls). This is only really useful if you +# need to see the specific cache-backend get/set/delete calls +# with the keys/values. Typically this should be left set to +# false. (boolean value) +#debug_cache_backend=false + +# Memcache servers in the format of "host:port". +# (dogpile.cache.memcache and keystone.cache.memcache_pool +# backends only) (list value) +#memcache_servers=localhost:11211 + +# Number of seconds memcached server is considered dead before +# it is tried again. (dogpile.cache.memcache and +# keystone.cache.memcache_pool backends only) (integer value) +#memcache_dead_retry=300 + +# Timeout in seconds for every call to a server. +# (dogpile.cache.memcache and keystone.cache.memcache_pool +# backends only) (integer value) +#memcache_socket_timeout=3 + +# Max total number of open connections to every memcached +# server. (keystone.cache.memcache_pool backend only) (integer +# value) +#memcache_pool_maxsize=10 + +# Number of seconds a connection to memcached is held unused +# in the pool before it is closed. +# (keystone.cache.memcache_pool backend only) (integer value) +#memcache_pool_unused_timeout=60 + +# Number of seconds that an operation will wait to get a +# memcache client connection. (integer value) +#memcache_pool_connection_get_timeout=10 + + +[catalog] + +# +# Options defined in keystone +# + +# Catalog template file name for use with the template catalog +# backend. (string value) +#template_file=/etc/keystone/default_catalog.templates + +# Catalog backend driver. (string value) +#driver=keystone.catalog.backends.sql.Catalog + +# Toggle for catalog caching. This has no effect unless global +# caching is enabled. (boolean value) +#caching=true + +# Time to cache catalog data (in seconds). This has no effect +# unless global and catalog caching are enabled. (integer +# value) +#cache_time= + +# Maximum number of entities that will be returned in a +# catalog collection. (integer value) +#list_limit= + +# (Deprecated) List of possible substitutions for use in +# formatting endpoints. Use caution when modifying this list. +# It will give users with permission to create endpoints the +# ability to see those values in your configuration file. This +# option will be removed in Juno. (list value) +#endpoint_substitution_whitelist=tenant_id,user_id,public_bind_host,admin_bind_host,compute_host,compute_port,admin_port,public_port,public_endpoint,admin_endpoint + + +[credential] + +# +# Options defined in keystone +# + +# Credential backend driver. (string value) +#driver=keystone.credential.backends.sql.Credential + + +[database] + +# +# Options defined in oslo.db +# + +# The file name to use with SQLite. (string value) +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the +# database. (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection=mysql://keystone:keystone@localhost/keystone +connection=mysql://{{db_user}}:{{db_password}}@{{db_host}}/{{db_name}} + +# The SQLAlchemy connection string to use to connect to the +# slave database. (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, +# 100=Everything. (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost. (boolean value) +#use_db_reconnect=false + +# Seconds between database connection retries. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between database connection +# retries up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between +# database connection retries. (integer value) +#db_max_retry_interval=10 + +# Maximum database connection retries before error is raised. +# Set to -1 to specify an infinite retry count. (integer +# value) +#db_max_retries=20 + + +[ec2] + +# +# Options defined in keystone +# + +# EC2Credential backend driver. (string value) +#driver=keystone.contrib.ec2.backends.sql.Ec2 + + +[endpoint_filter] + +# +# Options defined in keystone +# + +# Endpoint Filter backend driver (string value) +#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter + +# Toggle to return all active endpoints if no filter exists. +# (boolean value) +#return_all_endpoints_if_no_filter=true + + +[endpoint_policy] + +# +# Options defined in keystone +# + +# Endpoint policy backend driver (string value) +#driver=keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy + + +[federation] + +# +# Options defined in keystone +# + +# Federation backend driver. (string value) +#driver=keystone.contrib.federation.backends.sql.Federation + +# Value to be used when filtering assertion parameters from +# the environment. (string value) +#assertion_prefix= + + +[identity] + +# +# Options defined in keystone +# + +# This references the domain to use for all Identity API v2 +# requests (which are not aware of domains). A domain with +# this ID will be created for you by keystone-manage db_sync +# in migration 008. The domain referenced by this ID cannot be +# deleted on the v3 API, to prevent accidentally breaking the +# v2 API. There is nothing special about this domain, other +# than the fact that it must exist to order to maintain +# support for your v2 clients. (string value) +#default_domain_id=default + +# A subset (or all) of domains can have their own identity +# driver, each with their own partial configuration file in a +# domain configuration directory. Only values specific to the +# domain need to be placed in the domain specific +# configuration file. This feature is disabled by default; set +# to true to enable. (boolean value) +#domain_specific_drivers_enabled=false + +# Path for Keystone to locate the domain specific identity +# configuration files if domain_specific_drivers_enabled is +# set to true. (string value) +#domain_config_dir=/etc/keystone/domains + +# Identity backend driver. (string value) +#driver=keystone.identity.backends.sql.Identity + +# Maximum supported length for user passwords; decrease to +# improve performance. (integer value) +#max_password_length=4096 + +# Maximum number of entities that will be returned in an +# identity collection. (integer value) +#list_limit= + + +[identity_mapping] + +# +# Options defined in keystone +# + +# Keystone Identity Mapping backend driver. (string value) +#driver=keystone.identity.mapping_backends.sql.Mapping + +# Public ID generator for user and group entities. The +# Keystone identity mapper only supports generators that +# produce no more than 64 characters. (string value) +#generator=keystone.identity.id_generators.sha256.Generator + +# The format of user and group IDs changed in Juno for +# backends that do not generate UUIDs (e.g. LDAP), with +# keystone providing a hash mapping to the underlying +# attribute in LDAP. By default this mapping is disabled, +# which ensures that existing IDs will not change. Even when +# the mapping is enabled by using domain specific drivers, any +# users and groups from the default domain being handled by +# LDAP will still not be mapped to ensure their IDs remain +# backward compatible. Setting this value to False will enable +# the mapping for even the default LDAP driver. It is only +# safe to do this if you do not already have assignments for +# users and groups from the default LDAP domain, and it is +# acceptable for Keystone to provide the different IDs to +# clients than it did previously. Typically this means that +# the only time you can set this value to False is when +# configuring a fresh installation. (boolean value) +#backward_compatible_ids=true + + +[kvs] + +# +# Options defined in keystone +# + +# Extra dogpile.cache backend modules to register with the +# dogpile.cache library. (list value) +#backends= + +# Prefix for building the configuration dictionary for the KVS +# region. This should not need to be changed unless there is +# another dogpile.cache region with the same configuration +# name. (string value) +#config_prefix=keystone.kvs + +# Toggle to disable using a key-mangling function to ensure +# fixed length keys. This is toggle-able for debugging +# purposes, it is highly recommended to always leave this set +# to true. (boolean value) +#enable_key_mangler=true + +# Default lock timeout for distributed locking. (integer +# value) +#default_lock_timeout=5 + + +[ldap] + +# +# Options defined in keystone +# + +# URL for connecting to the LDAP server. (string value) +#url=ldap://localhost + +# User BindDN to query the LDAP server. (string value) +#user= + +# Password for the BindDN to query the LDAP server. (string +# value) +#password= + +# LDAP server suffix (string value) +#suffix=cn=example,cn=com + +# If true, will add a dummy member to groups. This is required +# if the objectclass for groups requires the "member" +# attribute. (boolean value) +#use_dumb_member=false + +# DN of the "dummy member" to use when "use_dumb_member" is +# enabled. (string value) +#dumb_member=cn=dumb,dc=nonexistent + +# Delete subtrees using the subtree delete control. Only +# enable this option if your LDAP server supports subtree +# deletion. (boolean value) +#allow_subtree_delete=false + +# The LDAP scope for queries, this can be either "one" +# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). +# (string value) +#query_scope=one + +# Maximum results per page; a value of zero ("0") disables +# paging. (integer value) +#page_size=0 + +# The LDAP dereferencing option for queries. This can be +# either "never", "searching", "always", "finding" or +# "default". The "default" option falls back to using default +# dereferencing configured by your ldap.conf. (string value) +#alias_dereferencing=default + +# Sets the LDAP debugging level for LDAP calls. A value of 0 +# means that debugging is not enabled. This value is a +# bitmask, consult your LDAP documentation for possible +# values. (integer value) +#debug_level= + +# Override the system's default referral chasing behavior for +# queries. (boolean value) +#chase_referrals= + +# Search base for users. (string value) +#user_tree_dn= + +# LDAP search filter for users. (string value) +#user_filter= + +# LDAP objectclass for users. (string value) +#user_objectclass=inetOrgPerson + +# LDAP attribute mapped to user id. WARNING: must not be a +# multivalued attribute. (string value) +#user_id_attribute=cn + +# LDAP attribute mapped to user name. (string value) +#user_name_attribute=sn + +# LDAP attribute mapped to user email. (string value) +#user_mail_attribute=mail + +# LDAP attribute mapped to password. (string value) +#user_pass_attribute=userPassword + +# LDAP attribute mapped to user enabled flag. (string value) +#user_enabled_attribute=enabled + +# Invert the meaning of the boolean enabled values. Some LDAP +# servers use a boolean lock attribute where "true" means an +# account is disabled. Setting "user_enabled_invert = true" +# will allow these lock attributes to be used. This setting +# will have no effect if "user_enabled_mask" or +# "user_enabled_emulation" settings are in use. (boolean +# value) +#user_enabled_invert=false + +# Bitmask integer to indicate the bit that the enabled value +# is stored in if the LDAP server represents "enabled" as a +# bit on an integer rather than a boolean. A value of "0" +# indicates the mask is not used. If this is not set to "0" +# the typical value is "2". This is typically used when +# "user_enabled_attribute = userAccountControl". (integer +# value) +#user_enabled_mask=0 + +# Default value to enable users. This should match an +# appropriate int value if the LDAP server uses non-boolean +# (bitmask) values to indicate if a user is enabled or +# disabled. If this is not set to "True" the typical value is +# "512". This is typically used when "user_enabled_attribute = +# userAccountControl". (string value) +#user_enabled_default=True + +# List of attributes stripped off the user on update. (list +# value) +#user_attribute_ignore=default_project_id,tenants + +# LDAP attribute mapped to default_project_id for users. +# (string value) +#user_default_project_id_attribute= + +# Allow user creation in LDAP backend. (boolean value) +#user_allow_create=true + +# Allow user updates in LDAP backend. (boolean value) +#user_allow_update=true + +# Allow user deletion in LDAP backend. (boolean value) +#user_allow_delete=true + +# If true, Keystone uses an alternative method to determine if +# a user is enabled or not by checking if they are a member of +# the "user_enabled_emulation_dn" group. (boolean value) +#user_enabled_emulation=false + +# DN of the group entry to hold enabled users when using +# enabled emulation. (string value) +#user_enabled_emulation_dn= + +# List of additional LDAP attributes used for mapping +# additional attribute mappings for users. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#user_additional_attribute_mapping= + +# Search base for projects (string value) +# Deprecated group/name - [ldap]/tenant_tree_dn +#project_tree_dn= + +# LDAP search filter for projects. (string value) +# Deprecated group/name - [ldap]/tenant_filter +#project_filter= + +# LDAP objectclass for projects. (string value) +# Deprecated group/name - [ldap]/tenant_objectclass +#project_objectclass=groupOfNames + +# LDAP attribute mapped to project id. (string value) +# Deprecated group/name - [ldap]/tenant_id_attribute +#project_id_attribute=cn + +# LDAP attribute mapped to project membership for user. +# (string value) +# Deprecated group/name - [ldap]/tenant_member_attribute +#project_member_attribute=member + +# LDAP attribute mapped to project name. (string value) +# Deprecated group/name - [ldap]/tenant_name_attribute +#project_name_attribute=ou + +# LDAP attribute mapped to project description. (string value) +# Deprecated group/name - [ldap]/tenant_desc_attribute +#project_desc_attribute=description + +# LDAP attribute mapped to project enabled. (string value) +# Deprecated group/name - [ldap]/tenant_enabled_attribute +#project_enabled_attribute=enabled + +# LDAP attribute mapped to project domain_id. (string value) +# Deprecated group/name - [ldap]/tenant_domain_id_attribute +#project_domain_id_attribute=businessCategory + +# List of attributes stripped off the project on update. (list +# value) +# Deprecated group/name - [ldap]/tenant_attribute_ignore +#project_attribute_ignore= + +# Allow project creation in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_create +#project_allow_create=true + +# Allow project update in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_update +#project_allow_update=true + +# Allow project deletion in LDAP backend. (boolean value) +# Deprecated group/name - [ldap]/tenant_allow_delete +#project_allow_delete=true + +# If true, Keystone uses an alternative method to determine if +# a project is enabled or not by checking if they are a member +# of the "project_enabled_emulation_dn" group. (boolean value) +# Deprecated group/name - [ldap]/tenant_enabled_emulation +#project_enabled_emulation=false + +# DN of the group entry to hold enabled projects when using +# enabled emulation. (string value) +# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn +#project_enabled_emulation_dn= + +# Additional attribute mappings for projects. Attribute +# mapping format is :, where ldap_attr +# is the attribute in the LDAP entry and user_attr is the +# Identity API attribute. (list value) +# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping +#project_additional_attribute_mapping= + +# Search base for roles. (string value) +#role_tree_dn= + +# LDAP search filter for roles. (string value) +#role_filter= + +# LDAP objectclass for roles. (string value) +#role_objectclass=organizationalRole + +# LDAP attribute mapped to role id. (string value) +#role_id_attribute=cn + +# LDAP attribute mapped to role name. (string value) +#role_name_attribute=ou + +# LDAP attribute mapped to role membership. (string value) +#role_member_attribute=roleOccupant + +# List of attributes stripped off the role on update. (list +# value) +#role_attribute_ignore= + +# Allow role creation in LDAP backend. (boolean value) +#role_allow_create=true + +# Allow role update in LDAP backend. (boolean value) +#role_allow_update=true + +# Allow role deletion in LDAP backend. (boolean value) +#role_allow_delete=true + +# Additional attribute mappings for roles. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#role_additional_attribute_mapping= + +# Search base for groups. (string value) +#group_tree_dn= + +# LDAP search filter for groups. (string value) +#group_filter= + +# LDAP objectclass for groups. (string value) +#group_objectclass=groupOfNames + +# LDAP attribute mapped to group id. (string value) +#group_id_attribute=cn + +# LDAP attribute mapped to group name. (string value) +#group_name_attribute=ou + +# LDAP attribute mapped to show group membership. (string +# value) +#group_member_attribute=member + +# LDAP attribute mapped to group description. (string value) +#group_desc_attribute=description + +# List of attributes stripped off the group on update. (list +# value) +#group_attribute_ignore= + +# Allow group creation in LDAP backend. (boolean value) +#group_allow_create=true + +# Allow group update in LDAP backend. (boolean value) +#group_allow_update=true + +# Allow group deletion in LDAP backend. (boolean value) +#group_allow_delete=true + +# Additional attribute mappings for groups. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#group_additional_attribute_mapping= + +# CA certificate file path for communicating with LDAP +# servers. (string value) +#tls_cacertfile= + +# CA certificate directory path for communicating with LDAP +# servers. (string value) +#tls_cacertdir= + +# Enable TLS for communicating with LDAP servers. (boolean +# value) +#use_tls=false + +# Valid options for tls_req_cert are demand, never, and allow. +# (string value) +#tls_req_cert=demand + +# Enable LDAP connection pooling. (boolean value) +#use_pool=false + +# Connection pool size. (integer value) +#pool_size=10 + +# Maximum count of reconnect trials. (integer value) +#pool_retry_max=3 + +# Time span in seconds to wait between two reconnect trials. +# (floating point value) +#pool_retry_delay=0.1 + +# Connector timeout in seconds. Value -1 indicates indefinite +# wait for response. (integer value) +#pool_connection_timeout=-1 + +# Connection lifetime in seconds. (integer value) +#pool_connection_lifetime=600 + +# Enable LDAP connection pooling for end user authentication. +# If use_pool is disabled, then this setting is meaningless +# and is not used at all. (boolean value) +#use_auth_pool=false + +# End user auth connection pool size. (integer value) +#auth_pool_size=100 + +# End user auth connection lifetime in seconds. (integer +# value) +#auth_pool_connection_lifetime=60 + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[memcache] + +# +# Options defined in keystone +# + +# Memcache servers in the format of "host:port". (list value) +#servers=localhost:11211 + +# Number of seconds memcached server is considered dead before +# it is tried again. This is used by the key value store +# system (e.g. token pooled memcached persistence backend). +# (integer value) +#dead_retry=300 + +# Timeout in seconds for every call to a server. This is used +# by the key value store system (e.g. token pooled memcached +# persistence backend). (integer value) +#socket_timeout=3 + +# Max total number of open connections to every memcached +# server. This is used by the key value store system (e.g. +# token pooled memcached persistence backend). (integer value) +#pool_maxsize=10 + +# Number of seconds a connection to memcached is held unused +# in the pool before it is closed. This is used by the key +# value store system (e.g. token pooled memcached persistence +# backend). (integer value) +#pool_unused_timeout=60 + +# Number of seconds that an operation will wait to get a +# memcache client connection. This is used by the key value +# store system (e.g. token pooled memcached persistence +# backend). (integer value) +#pool_connection_get_timeout=10 + + +[oauth1] + +# +# Options defined in keystone +# + +# Credential backend driver. (string value) +#driver=keystone.contrib.oauth1.backends.sql.OAuth1 + +# Duration (in seconds) for the OAuth Request Token. (integer +# value) +#request_token_duration=28800 + +# Duration (in seconds) for the OAuth Access Token. (integer +# value) +#access_token_duration=86400 + + +[os_inherit] + +# +# Options defined in keystone +# + +# role-assignment inheritance to projects from owning domain +# can be optionally enabled. (boolean value) +#enabled=false + + +[paste_deploy] + +# +# Options defined in keystone +# + +# Name of the paste configuration file that defines the +# available pipelines. (string value) +#config_file=/usr/share/keystone/keystone-dist-paste.ini + + +[policy] + +# +# Options defined in keystone +# + +# Policy backend driver. (string value) +#driver=keystone.policy.backends.sql.Policy + +# Maximum number of entities that will be returned in a policy +# collection. (integer value) +#list_limit= + + +[revoke] + +# +# Options defined in keystone +# + +# An implementation of the backend for persisting revocation +# events. (string value) +#driver=keystone.contrib.revoke.backends.kvs.Revoke + +# This value (calculated in seconds) is added to token +# expiration before a revocation event may be removed from the +# backend. (integer value) +#expiration_buffer=1800 + +# Toggle for revocation event caching. This has no effect +# unless global caching is enabled. (boolean value) +#caching=true + + +[saml] + +# +# Options defined in keystone +# + +# Default TTL, in seconds, for any generated SAML assertion +# created by Keystone. (integer value) +#assertion_expiration_time=3600 + +# Binary to be called for XML signing. Install the appropriate +# package, specify absolute path or adjust your PATH +# environment variable if the binary cannot be found. (string +# value) +#xmlsec1_binary=xmlsec1 + +# Path of the certfile for SAML signing. For non-production +# environments, you may be interested in using `keystone- +# manage pki_setup` to generate self-signed certificates. +# Note, the path cannot contain a comma. (string value) +#certfile=/etc/keystone/ssl/certs/signing_cert.pem + +# Path of the keyfile for SAML signing. Note, the path cannot +# contain a comma. (string value) +#keyfile=/etc/keystone/ssl/private/signing_key.pem + +# Entity ID value for unique Identity Provider identification. +# Usually FQDN is set with a suffix. A value is required to +# generate IDP Metadata. For example: +# https://keystone.example.com/v3/OS-FEDERATION/saml2/idp +# (string value) +#idp_entity_id= + +# Identity Provider Single-Sign-On service value, required in +# the Identity Provider's metadata. A value is required to +# generate IDP Metadata. For example: +# https://keystone.example.com/v3/OS-FEDERATION/saml2/sso +# (string value) +#idp_sso_endpoint= + +# Language used by the organization. (string value) +#idp_lang=en + +# Organization name the installation belongs to. (string +# value) +#idp_organization_name= + +# Organization name to be displayed. (string value) +#idp_organization_display_name= + +# URL of the organization. (string value) +#idp_organization_url= + +# Company of contact person. (string value) +#idp_contact_company= + +# Given name of contact person (string value) +#idp_contact_name= + +# Surname of contact person. (string value) +#idp_contact_surname= + +# Email address of contact person. (string value) +#idp_contact_email= + +# Telephone number of contact person. (string value) +#idp_contact_telephone= + +# Contact type. Allowed values are: technical, support, +# administrative billing, and other (string value) +#idp_contact_type=other + +# Path to the Identity Provider Metadata file. This file +# should be generated with the keystone-manage +# saml_idp_metadata command. (string value) +#idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml + + +[signing] + +# +# Options defined in keystone +# + +# Deprecated in favor of provider in the [token] section. +# (string value) +#token_format= + +# Path of the certfile for token signing. For non-production +# environments, you may be interested in using `keystone- +# manage pki_setup` to generate self-signed certificates. +# (string value) +#certfile=/etc/keystone/ssl/certs/signing_cert.pem + +# Path of the keyfile for token signing. (string value) +#keyfile=/etc/keystone/ssl/private/signing_key.pem + +# Path of the CA for token signing. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key for token signing. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Key size (in bits) for token signing cert (auto generated +# certificate). (integer value) +#key_size=2048 + +# Days the token signing cert is valid for (auto generated +# certificate). (integer value) +#valid_days=3650 + +# Certificate subject (auto generated certificate) for token +# signing. (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com + + +[ssl] + +# +# Options defined in keystone +# + +# Toggle for SSL support on the Keystone eventlet servers. +# (boolean value) +#enable=false + +# Path of the certfile for SSL. For non-production +# environments, you may be interested in using `keystone- +# manage ssl_setup` to generate self-signed certificates. +# (string value) +#certfile=/etc/keystone/ssl/certs/keystone.pem + +# Path of the keyfile for SSL. (string value) +#keyfile=/etc/keystone/ssl/private/keystonekey.pem + +# Path of the ca cert file for SSL. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key file for SSL. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Require client certificate. (boolean value) +#cert_required=false + +# SSL key length (in bits) (auto generated certificate). +# (integer value) +#key_size=1024 + +# Days the certificate is valid for once signed (auto +# generated certificate). (integer value) +#valid_days=3650 + +# SSL certificate subject (auto generated certificate). +# (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost + + +[stats] + +# +# Options defined in keystone +# + +# Stats backend driver. (string value) +#driver=keystone.contrib.stats.backends.kvs.Stats + + +[token] + +# +# Options defined in keystone +# + +# External auth mechanisms that should add bind information to +# token, e.g., kerberos,x509. (list value) +#bind= + +# Enforcement policy on tokens presented to Keystone with bind +# information. One of disabled, permissive, strict, required +# or a specifically required bind mode, e.g., kerberos or x509 +# to require binding to that authentication. (string value) +#enforce_token_bind=permissive + +# Amount of time a token should remain valid (in seconds). +# (integer value) +#expiration=3600 + +# Controls the token construction, validation, and revocation +# operations. Core providers are +# "keystone.token.providers.[pkiz|pki|uuid].Provider". The +# default provider is uuid. (string value) +#provider= + +# Token persistence backend driver. (string value) +#driver=keystone.token.persistence.backends.sql.Token + +# Toggle for token system caching. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# Time to cache the revocation list and the revocation events +# if revoke extension is enabled (in seconds). This has no +# effect unless global and token caching are enabled. (integer +# value) +#revocation_cache_time=3600 + +# Time to cache tokens (in seconds). This has no effect unless +# global and token caching are enabled. (integer value) +#cache_time= + +# Revoke token by token identifier. Setting revoke_by_id to +# true enables various forms of enumerating tokens, e.g. `list +# tokens for user`. These enumerations are processed to +# determine the list of tokens to revoke. Only disable if you +# are switching to using the Revoke extension with a backend +# other than KVS, which stores events in memory. (boolean +# value) +#revoke_by_id=true + +# The hash algorithm to use for PKI tokens. This can be set to +# any algorithm that hashlib supports. WARNING: Before +# changing this value, the auth_token middleware must be +# configured with the hash_algorithms, otherwise token +# revocation will not be processed correctly. (string value) +#hash_algorithm=md5 + + +[trust] + +# +# Options defined in keystone +# + +# Delegation and impersonation features can be optionally +# disabled. (boolean value) +#enabled=true + +# Trust backend driver. (string value) +#driver=keystone.trust.backends.sql.Trust + + diff --git a/x/resources/keystone_config/templates/logging.conf b/x/resources/keystone_config/templates/logging.conf new file mode 100644 index 00000000..6cb8c425 --- /dev/null +++ b/x/resources/keystone_config/templates/logging.conf @@ -0,0 +1,65 @@ +[loggers] +keys=root,access + +[handlers] +keys=production,file,access_file,devel + +[formatters] +keys=minimal,normal,debug + + +########### +# Loggers # +########### + +[logger_root] +level=WARNING +handlers=file + +[logger_access] +level=INFO +qualname=access +handlers=access_file + + +################ +# Log Handlers # +################ + +[handler_production] +class=handlers.SysLogHandler +level=ERROR +formatter=normal +args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) + +[handler_file] +class=handlers.WatchedFileHandler +level=WARNING +formatter=normal +args=('error.log',) + +[handler_access_file] +class=handlers.WatchedFileHandler +level=INFO +formatter=minimal +args=('access.log',) + +[handler_devel] +class=StreamHandler +level=NOTSET +formatter=debug +args=(sys.stdout,) + + +################## +# Log Formatters # +################## + +[formatter_minimal] +format=%(message)s + +[formatter_normal] +format=(%(name)s): %(asctime)s %(levelname)s %(message)s + +[formatter_debug] +format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s diff --git a/x/resources/keystone_config/templates/policy.json b/x/resources/keystone_config/templates/policy.json new file mode 100644 index 00000000..af65205e --- /dev/null +++ b/x/resources/keystone_config/templates/policy.json @@ -0,0 +1,171 @@ +{ + "admin_required": "role:admin or is_admin:1", + "service_role": "role:service", + "service_or_admin": "rule:admin_required or rule:service_role", + "owner" : "user_id:%(user_id)s", + "admin_or_owner": "rule:admin_required or rule:owner", + + "default": "rule:admin_required", + + "identity:get_region": "", + "identity:list_regions": "", + "identity:create_region": "rule:admin_required", + "identity:update_region": "rule:admin_required", + "identity:delete_region": "rule:admin_required", + + "identity:get_service": "rule:admin_required", + "identity:list_services": "rule:admin_required", + "identity:create_service": "rule:admin_required", + "identity:update_service": "rule:admin_required", + "identity:delete_service": "rule:admin_required", + + "identity:get_endpoint": "rule:admin_required", + "identity:list_endpoints": "rule:admin_required", + "identity:create_endpoint": "rule:admin_required", + "identity:update_endpoint": "rule:admin_required", + "identity:delete_endpoint": "rule:admin_required", + + "identity:get_domain": "rule:admin_required", + "identity:list_domains": "rule:admin_required", + "identity:create_domain": "rule:admin_required", + "identity:update_domain": "rule:admin_required", + "identity:delete_domain": "rule:admin_required", + + "identity:get_project": "rule:admin_required", + "identity:list_projects": "rule:admin_required", + "identity:list_user_projects": "rule:admin_or_owner", + "identity:create_project": "rule:admin_required", + "identity:update_project": "rule:admin_required", + "identity:delete_project": "rule:admin_required", + + "identity:get_user": "rule:admin_required", + "identity:list_users": "rule:admin_required", + "identity:create_user": "rule:admin_required", + "identity:update_user": "rule:admin_required", + "identity:delete_user": "rule:admin_required", + "identity:change_password": "rule:admin_or_owner", + + "identity:get_group": "rule:admin_required", + "identity:list_groups": "rule:admin_required", + "identity:list_groups_for_user": "rule:admin_or_owner", + "identity:create_group": "rule:admin_required", + "identity:update_group": "rule:admin_required", + "identity:delete_group": "rule:admin_required", + "identity:list_users_in_group": "rule:admin_required", + "identity:remove_user_from_group": "rule:admin_required", + "identity:check_user_in_group": "rule:admin_required", + "identity:add_user_to_group": "rule:admin_required", + + "identity:get_credential": "rule:admin_required", + "identity:list_credentials": "rule:admin_required", + "identity:create_credential": "rule:admin_required", + "identity:update_credential": "rule:admin_required", + "identity:delete_credential": "rule:admin_required", + + "identity:ec2_get_credential": "rule:admin_or_owner", + "identity:ec2_list_credentials": "rule:admin_or_owner", + "identity:ec2_create_credential": "rule:admin_or_owner", + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", + + "identity:get_role": "rule:admin_required", + "identity:list_roles": "rule:admin_required", + "identity:create_role": "rule:admin_required", + "identity:update_role": "rule:admin_required", + "identity:delete_role": "rule:admin_required", + + "identity:check_grant": "rule:admin_required", + "identity:list_grants": "rule:admin_required", + "identity:create_grant": "rule:admin_required", + "identity:revoke_grant": "rule:admin_required", + + "identity:list_role_assignments": "rule:admin_required", + + "identity:get_policy": "rule:admin_required", + "identity:list_policies": "rule:admin_required", + "identity:create_policy": "rule:admin_required", + "identity:update_policy": "rule:admin_required", + "identity:delete_policy": "rule:admin_required", + + "identity:check_token": "rule:admin_required", + "identity:validate_token": "rule:service_or_admin", + "identity:validate_token_head": "rule:service_or_admin", + "identity:revocation_list": "rule:service_or_admin", + "identity:revoke_token": "rule:admin_or_owner", + + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", + "identity:get_trust": "rule:admin_or_owner", + "identity:list_trusts": "", + "identity:list_roles_for_trust": "", + "identity:check_role_for_trust": "", + "identity:get_role_for_trust": "", + "identity:delete_trust": "", + + "identity:create_consumer": "rule:admin_required", + "identity:get_consumer": "rule:admin_required", + "identity:list_consumers": "rule:admin_required", + "identity:delete_consumer": "rule:admin_required", + "identity:update_consumer": "rule:admin_required", + + "identity:authorize_request_token": "rule:admin_required", + "identity:list_access_token_roles": "rule:admin_required", + "identity:get_access_token_role": "rule:admin_required", + "identity:list_access_tokens": "rule:admin_required", + "identity:get_access_token": "rule:admin_required", + "identity:delete_access_token": "rule:admin_required", + + "identity:list_projects_for_endpoint": "rule:admin_required", + "identity:add_endpoint_to_project": "rule:admin_required", + "identity:check_endpoint_in_project": "rule:admin_required", + "identity:list_endpoints_for_project": "rule:admin_required", + "identity:remove_endpoint_from_project": "rule:admin_required", + + "identity:create_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups": "rule:admin_required", + "identity:get_endpoint_group": "rule:admin_required", + "identity:update_endpoint_group": "rule:admin_required", + "identity:delete_endpoint_group": "rule:admin_required", + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", + "identity:list_endpoint_groups_for_project": "rule:admin_required", + "identity:add_endpoint_group_to_project": "rule:admin_required", + "identity:remove_endpoint_group_from_project": "rule:admin_required", + + "identity:create_identity_provider": "rule:admin_required", + "identity:list_identity_providers": "rule:admin_required", + "identity:get_identity_providers": "rule:admin_required", + "identity:update_identity_provider": "rule:admin_required", + "identity:delete_identity_provider": "rule:admin_required", + + "identity:create_protocol": "rule:admin_required", + "identity:update_protocol": "rule:admin_required", + "identity:get_protocol": "rule:admin_required", + "identity:list_protocols": "rule:admin_required", + "identity:delete_protocol": "rule:admin_required", + + "identity:create_mapping": "rule:admin_required", + "identity:get_mapping": "rule:admin_required", + "identity:list_mappings": "rule:admin_required", + "identity:delete_mapping": "rule:admin_required", + "identity:update_mapping": "rule:admin_required", + + "identity:get_auth_catalog": "", + "identity:get_auth_projects": "", + "identity:get_auth_domains": "", + + "identity:list_projects_for_groups": "", + "identity:list_domains_for_groups": "", + + "identity:list_revoke_events": "", + + "identity:create_policy_association_for_endpoint": "rule:admin_required", + "identity:check_policy_association_for_endpoint": "rule:admin_required", + "identity:delete_policy_association_for_endpoint": "rule:admin_required", + "identity:create_policy_association_for_service": "rule:admin_required", + "identity:check_policy_association_for_service": "rule:admin_required", + "identity:delete_policy_association_for_service": "rule:admin_required", + "identity:create_policy_association_for_region_and_service": "rule:admin_required", + "identity:check_policy_association_for_region_and_service": "rule:admin_required", + "identity:delete_policy_association_for_region_and_service": "rule:admin_required", + "identity:get_policy_for_endpoint": "rule:admin_required", + "identity:list_endpoints_for_policy": "rule:admin_required" +} From cc8b0974e2fdf3a584f1f1ba75747932e158de10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 28 Apr 2015 09:59:33 +0000 Subject: [PATCH 66/87] Keystone user resource --- x/resources/keystone_user/actions/remove.yml | 6 ++++++ x/resources/keystone_user/actions/run.yml | 6 ++++++ x/resources/keystone_user/meta.yaml | 14 ++++++++++++++ 3 files changed, 26 insertions(+) create mode 100644 x/resources/keystone_user/actions/remove.yml create mode 100644 x/resources/keystone_user/actions/run.yml create mode 100644 x/resources/keystone_user/meta.yaml diff --git a/x/resources/keystone_user/actions/remove.yml b/x/resources/keystone_user/actions/remove.yml new file mode 100644 index 00000000..492749ef --- /dev/null +++ b/x/resources/keystone_user/actions/remove.yml @@ -0,0 +1,6 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: keystone user + - keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ user={{user_name}} tenant={{tenant_name}} state=absent + - keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ tenant={{tenant_name}} state=absent diff --git a/x/resources/keystone_user/actions/run.yml b/x/resources/keystone_user/actions/run.yml new file mode 100644 index 00000000..1a7a5469 --- /dev/null +++ b/x/resources/keystone_user/actions/run.yml @@ -0,0 +1,6 @@ +- hosts: [{{ ip }}] + sudo: yes + tasks: + - name: keystone user + - keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ tenant={{tenant_name}} state=present + - keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ user={{user_name}} password={{user_password}} tenant={{tenant_name}} state=present diff --git a/x/resources/keystone_user/meta.yaml b/x/resources/keystone_user/meta.yaml new file mode 100644 index 00000000..6293937c --- /dev/null +++ b/x/resources/keystone_user/meta.yaml @@ -0,0 +1,14 @@ +id: keystone_user +handler: ansible +version: 1.0.0 +input: + keystone_host: + keystone_port: + login_user: + login_token: + user_name: + user_password: + tenant_name: + ip: + ssh_key: + ssh_user: From a64be505ee0ce2944673caf4f664f62da79ce93d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 28 Apr 2015 12:23:46 +0000 Subject: [PATCH 67/87] Rename mariadb resource to mariadb_service --- x/resources/{mariadb => mariadb_service}/actions/remove.yml | 0 x/resources/{mariadb => mariadb_service}/actions/run.yml | 0 x/resources/{mariadb => mariadb_service}/meta.yaml | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename x/resources/{mariadb => mariadb_service}/actions/remove.yml (100%) rename x/resources/{mariadb => mariadb_service}/actions/run.yml (100%) rename x/resources/{mariadb => mariadb_service}/meta.yaml (100%) diff --git a/x/resources/mariadb/actions/remove.yml b/x/resources/mariadb_service/actions/remove.yml similarity index 100% rename from x/resources/mariadb/actions/remove.yml rename to x/resources/mariadb_service/actions/remove.yml diff --git a/x/resources/mariadb/actions/run.yml b/x/resources/mariadb_service/actions/run.yml similarity index 100% rename from x/resources/mariadb/actions/run.yml rename to x/resources/mariadb_service/actions/run.yml diff --git a/x/resources/mariadb/meta.yaml b/x/resources/mariadb_service/meta.yaml similarity index 100% rename from x/resources/mariadb/meta.yaml rename to x/resources/mariadb_service/meta.yaml From f6f04df7b84bb97377dc6a6094dfeecf2c413c04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 28 Apr 2015 17:01:54 +0000 Subject: [PATCH 68/87] Add db name to maria_db resource and use it instead of resource name --- x/resources/mariadb_db/actions/remove.yml | 2 +- x/resources/mariadb_db/actions/run.yml | 2 +- x/resources/mariadb_db/meta.yaml | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/x/resources/mariadb_db/actions/remove.yml b/x/resources/mariadb_db/actions/remove.yml index fe6d6488..594061a4 100644 --- a/x/resources/mariadb_db/actions/remove.yml +++ b/x/resources/mariadb_db/actions/remove.yml @@ -3,7 +3,7 @@ tasks: - name: mariadb db mysql_db: - name: {{name}} + name: {{db_name}} state: absent login_user: root login_password: {{login_password}} diff --git a/x/resources/mariadb_db/actions/run.yml b/x/resources/mariadb_db/actions/run.yml index fda96b5b..0efb73ed 100644 --- a/x/resources/mariadb_db/actions/run.yml +++ b/x/resources/mariadb_db/actions/run.yml @@ -3,7 +3,7 @@ tasks: - name: mariadb db mysql_db: - name: {{name}} + name: {{db_name}} state: present login_user: root login_password: {{login_password}} diff --git a/x/resources/mariadb_db/meta.yaml b/x/resources/mariadb_db/meta.yaml index 6bd49b7d..609814ca 100644 --- a/x/resources/mariadb_db/meta.yaml +++ b/x/resources/mariadb_db/meta.yaml @@ -5,6 +5,7 @@ actions: run: run.yml remove: remove.yml input: + db_name: login_password: login_port: login_user: From 6fd1a69dd9b41281a6a661038c502272a72d764c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 28 Apr 2015 17:06:26 +0000 Subject: [PATCH 69/87] Add user name to maria_db_user resource and use it instead of resource name --- x/resources/mariadb_user/actions/remove.yml | 2 +- x/resources/mariadb_user/actions/run.yml | 6 +++--- x/resources/mariadb_user/meta.yaml | 5 +++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/x/resources/mariadb_user/actions/remove.yml b/x/resources/mariadb_user/actions/remove.yml index 9df0be6c..7f6939d4 100644 --- a/x/resources/mariadb_user/actions/remove.yml +++ b/x/resources/mariadb_user/actions/remove.yml @@ -3,7 +3,7 @@ tasks: - name: mariadb user mysql_user: - name: {{name}} + name: {{new_user_name}} state: absent login_user: root login_password: {{login_password}} diff --git a/x/resources/mariadb_user/actions/run.yml b/x/resources/mariadb_user/actions/run.yml index 98beea11..b0981803 100644 --- a/x/resources/mariadb_user/actions/run.yml +++ b/x/resources/mariadb_user/actions/run.yml @@ -3,9 +3,9 @@ tasks: - name: mariadb user mysql_user: - name: {{name}} - password: {{password}} - priv: {{db}}.*:ALL + name: {{new_user_name}} + password: {{new_user_password}} + priv: {{db_name}}.*:ALL host: '%' state: present login_user: root diff --git a/x/resources/mariadb_user/meta.yaml b/x/resources/mariadb_user/meta.yaml index 2d5a34aa..b45f8e47 100644 --- a/x/resources/mariadb_user/meta.yaml +++ b/x/resources/mariadb_user/meta.yaml @@ -5,8 +5,9 @@ actions: run: run.yml remove: remove.yml input: - password: - db: + new_user_password: + new_user_name: + db_name: login_password: login_port: login_user: From 1ac5d6342326dc5eece1e91394cb5d4a893c68aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 08:13:32 +0000 Subject: [PATCH 70/87] Add command to keyston service resource and run it in host network --- x/resources/keystone_service/actions/run.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x/resources/keystone_service/actions/run.yml b/x/resources/keystone_service/actions/run.yml index 7eb81109..c3902aab 100644 --- a/x/resources/keystone_service/actions/run.yml +++ b/x/resources/keystone_service/actions/run.yml @@ -3,6 +3,7 @@ tasks: - name: keystone container docker: + command: /bin/bash -c "keystone-manage db_sync && /usr/bin/keystone-all" name: {{ name }} image: {{ image }} state: running @@ -11,3 +12,4 @@ - {{ admin_port }}:35357 volumes: - {{ config_dir }}:/etc/keystone + net: host From 64c0ff9edaf52903635bd0ed2f174a84f001a809 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 08:14:09 +0000 Subject: [PATCH 71/87] Remove not needed code from haproxy_config resource --- x/resources/haproxy_config/actions/remove.yml | 6 ------ x/resources/haproxy_config/actions/run.yml | 6 ------ x/resources/haproxy_config/meta.yaml | 2 +- 3 files changed, 1 insertion(+), 13 deletions(-) delete mode 100644 x/resources/haproxy_config/actions/remove.yml delete mode 100644 x/resources/haproxy_config/actions/run.yml diff --git a/x/resources/haproxy_config/actions/remove.yml b/x/resources/haproxy_config/actions/remove.yml deleted file mode 100644 index 76142acf..00000000 --- a/x/resources/haproxy_config/actions/remove.yml +++ /dev/null @@ -1,6 +0,0 @@ -# TODO -- hosts: [{{ ip }}] - sudo: yes - tasks: - - shell: docker stop {{ name }} - - shell: docker rm {{ name }} diff --git a/x/resources/haproxy_config/actions/run.yml b/x/resources/haproxy_config/actions/run.yml deleted file mode 100644 index e223fe8f..00000000 --- a/x/resources/haproxy_config/actions/run.yml +++ /dev/null @@ -1,6 +0,0 @@ -# TODO -- hosts: [{{ ip }}] - sudo: yes - tasks: - - shell: docker run -d --net="host" --privileged \ - --name {{ name }} {{ image }} diff --git a/x/resources/haproxy_config/meta.yaml b/x/resources/haproxy_config/meta.yaml index fe685683..a7584600 100644 --- a/x/resources/haproxy_config/meta.yaml +++ b/x/resources/haproxy_config/meta.yaml @@ -1,5 +1,5 @@ id: haproxy_config -handler: ansible +handler: none version: 1.0.0 input: name: From 784230f47c9e9ed4628f6d739faa50ba819a5430 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Wed, 29 Apr 2015 10:38:58 +0200 Subject: [PATCH 72/87] Fix resource error message for not existing base resource --- x/resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/resource.py b/x/resource.py index f6d0d949..bd5b6e32 100644 --- a/x/resource.py +++ b/x/resource.py @@ -118,7 +118,7 @@ class Resource(object): def create(name, base_path, dest_path, args, connections={}): if not os.path.exists(base_path): - raise Exception('Base resource does not exist: {0}'.format(dest_path)) + raise Exception('Base resource does not exist: {0}'.format(base_path)) if not os.path.exists(dest_path): raise Exception('Dest dir does not exist: {0}'.format(dest_path)) if not os.path.isdir(dest_path): From c3711cd7f02b0a3b44a67ba21f0d5119f5420731 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 09:49:41 +0000 Subject: [PATCH 73/87] Use absolute path for resource base dir --- x/resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/resource.py b/x/resource.py index bd5b6e32..437b5f32 100644 --- a/x/resource.py +++ b/x/resource.py @@ -124,7 +124,7 @@ def create(name, base_path, dest_path, args, connections={}): if not os.path.isdir(dest_path): raise Exception('Dest path is not a directory: {0}'.format(dest_path)) - dest_path = os.path.join(dest_path, name) + dest_path = os.path.abspath(os.path.join(dest_path, name)) base_meta_file = os.path.join(base_path, 'meta.yaml') actions_path = os.path.join(base_path, 'actions') From 983a964a51b7601c1300cdaf6332c097cf54b96d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 09:50:05 +0000 Subject: [PATCH 74/87] Fix haproxy config removal --- x/resources/haproxy/actions/remove.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x/resources/haproxy/actions/remove.yml b/x/resources/haproxy/actions/remove.yml index 76142acf..f6c1f6e5 100644 --- a/x/resources/haproxy/actions/remove.yml +++ b/x/resources/haproxy/actions/remove.yml @@ -2,5 +2,4 @@ - hosts: [{{ ip }}] sudo: yes tasks: - - shell: docker stop {{ name }} - - shell: docker rm {{ name }} + - file: path={{ config_dir.value['src'] }} state=absent From bda801f4ca665feac40169f5e1abbdd942882179 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 09:56:40 +0000 Subject: [PATCH 75/87] Remove solar-de[5,6] --- Vagrantfile | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index bf248c30..b1daf6e2 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -60,27 +60,4 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| v.name = "solar-dev4" end end - - config.vm.define "solar-dev5" do |guest5| - guest5.vm.provision "shell", inline: init_script, privileged: true - guest5.vm.network "private_network", ip: "10.0.0.6" - guest5.vm.host_name = "solar-dev5" - - guest5.vm.provider :virtualbox do |v| - v.customize ["modifyvm", :id, "--memory", 256] - v.name = "solar-dev5" - end - end - - config.vm.define "solar-dev6" do |guest6| - guest6.vm.provision "shell", inline: init_script, privileged: true - guest6.vm.network "private_network", ip: "10.0.0.7" - guest6.vm.host_name = "solar-dev6" - - guest6.vm.provider :virtualbox do |v| - v.customize ["modifyvm", :id, "--memory", 256] - v.name = "solar-dev6" - end - end - end From adb631089aeae99b9bf37dbe76eec425793711e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 09:57:10 +0000 Subject: [PATCH 76/87] Install docker-py on all dev vms --- main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/main.yml b/main.yml index b5673080..6b9c7074 100644 --- a/main.yml +++ b/main.yml @@ -11,6 +11,8 @@ - apt: name=virtualenvwrapper state=present - apt: name=ipython state=present - apt: name=python-pudb state=present + - apt: name=python-pip state=present + - shell: pip install docker-py # requirements - shell: pip install -r /vagrant/requirements.txt From 50c76eed9d7b57a9d07dfd21711f99f7988b720e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 09:57:52 +0000 Subject: [PATCH 77/87] Remove not resource related tasks from action --- x/resources/haproxy/actions/run.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/x/resources/haproxy/actions/run.yml b/x/resources/haproxy/actions/run.yml index 4e71c532..8b112a09 100644 --- a/x/resources/haproxy/actions/run.yml +++ b/x/resources/haproxy/actions/run.yml @@ -16,9 +16,6 @@ {% endfor %} {% endfor %} tasks: - - apt: name=python-pip state=present - - shell: pip install docker-py - - service: name=docker state=started - file: path={{ config_dir.value['src'] }}/ state=directory - file: path={{ config_dir.value['src'] }}/haproxy.cfg state=touch - template: src=/vagrant/haproxy.cfg dest={{ config_dir.value['src'] }}/haproxy.cfg From 8514f1492d3a283e712b2fdb0c2fb6af04c32abe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 09:59:33 +0000 Subject: [PATCH 78/87] Install python-mysqldb on all nodes --- main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/main.yml b/main.yml index 6b9c7074..3ef009ba 100644 --- a/main.yml +++ b/main.yml @@ -12,6 +12,7 @@ - apt: name=ipython state=present - apt: name=python-pudb state=present - apt: name=python-pip state=present + - apt: name=python-mysqldb state=present - shell: pip install docker-py # requirements From abd21d407127377b96b1c3ed9f9df495c288160d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 17:17:24 +0200 Subject: [PATCH 79/87] Use docker-py version 1.1.0 --- main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main.yml b/main.yml index 3ef009ba..7dc87a07 100644 --- a/main.yml +++ b/main.yml @@ -13,14 +13,14 @@ - apt: name=python-pudb state=present - apt: name=python-pip state=present - apt: name=python-mysqldb state=present - - shell: pip install docker-py + - shell: pip install docker-py==1.1.0 # requirements - shell: pip install -r /vagrant/requirements.txt # Graph drawing #- apt: name=python-matplotlib state=present - - apt: name=python-graphviz state=present + - apt: name=python-pygraphviz state=present # Setup development env for solar #- shell: python setup.py develop chdir=/vagrant/solar From 1268095b5857440fb66833f2b677dede501a8dbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 18:13:00 +0200 Subject: [PATCH 80/87] Increase memory for vms. Mariadb needs 256 MB for internal use --- Vagrantfile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index b1daf6e2..1d12b1ba 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -13,7 +13,8 @@ SCRIPT Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - config.vm.box = "rustyrobot/deb-jessie-amd64" + config.vm.box = "deb/jessie-amd64" + #rustyrobot/deb-jessie-amd64" config.vm.define "solar-dev", primary: true do |guest1| guest1.vm.provision "shell", inline: init_script, privileged: true @@ -34,7 +35,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| guest2.vm.host_name = "solar-dev2" guest2.vm.provider :virtualbox do |v| - v.customize ["modifyvm", :id, "--memory", 256] + v.customize ["modifyvm", :id, "--memory", 1024] v.name = "solar-dev2" end end @@ -45,7 +46,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| guest3.vm.host_name = "solar-dev3" guest3.vm.provider :virtualbox do |v| - v.customize ["modifyvm", :id, "--memory", 256] + v.customize ["modifyvm", :id, "--memory", 1024] v.name = "solar-dev3" end end @@ -56,7 +57,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| guest4.vm.host_name = "solar-dev4" guest4.vm.provider :virtualbox do |v| - v.customize ["modifyvm", :id, "--memory", 256] + v.customize ["modifyvm", :id, "--memory", 1024] v.name = "solar-dev4" end end From e19d1511ac730e7e4a8651815fcebe7734547a87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 18:13:38 +0200 Subject: [PATCH 81/87] Expose IPs from keystone service. Host mode not needed anymore --- x/resources/keystone_service/actions/run.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/x/resources/keystone_service/actions/run.yml b/x/resources/keystone_service/actions/run.yml index c3902aab..305ff7e1 100644 --- a/x/resources/keystone_service/actions/run.yml +++ b/x/resources/keystone_service/actions/run.yml @@ -7,9 +7,11 @@ name: {{ name }} image: {{ image }} state: running + expose: + - 5000 + - 35357 ports: - {{ port }}:5000 - {{ admin_port }}:35357 volumes: - {{ config_dir }}:/etc/keystone - net: host From ed98ae1d99df97296db86b42ad7650af63f2bda8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 18:14:11 +0200 Subject: [PATCH 82/87] Run only resource specific tasks. Use host net mode by default --- x/resources/docker_container/actions/run.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/x/resources/docker_container/actions/run.yml b/x/resources/docker_container/actions/run.yml index a04d1a30..9156b900 100644 --- a/x/resources/docker_container/actions/run.yml +++ b/x/resources/docker_container/actions/run.yml @@ -2,13 +2,11 @@ - hosts: [{{ ip }}] sudo: yes tasks: - - apt: name=python-pip state=present - - shell: pip install docker-py - - service: name=docker state=started - docker: name: {{ name }} image: {{ image }} state: running + net: host ports: {% for port in ports.value %} - {{ port['value'] }}:{{ port['value'] }} From d8f000c8d87f1448754d035f0436c9f870c61283 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Wed, 29 Apr 2015 18:16:42 +0200 Subject: [PATCH 83/87] Example deployement --- example.py | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 example.py diff --git a/example.py b/example.py new file mode 100644 index 00000000..fe9bed4c --- /dev/null +++ b/example.py @@ -0,0 +1,92 @@ +import shutil +import os + +from x import resource +from x import signals + + +signals.Connections.clear() + +if os.path.exists('rs'): + shutil.rmtree('rs') +os.mkdir('rs') + +node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) +node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) +node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.5', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) + +mariadb_service1 = resource.create('mariadb_service1', 'x/resources/mariadb_service', 'rs/', {'image':'mariadb', 'root_password' : 'mariadb', 'port' : '3306', 'ip':'', 'ssh_user':'', 'ssh_key':''}) +keystone_db = resource.create('keystone_db', 'x/resources/mariadb_db/', 'rs/', {'db_name':'keystone_db', 'login_password':'', 'login_user':'root', 'login_port': '', 'ip':'', 'ssh_user':'', 'ssh_key':''}) +keystone_db_user = resource.create('keystone_db_user', 'x/resources/mariadb_user/', 'rs/', {'new_user_name' : 'keystone', 'new_user_password' : 'keystone', 'db_name':'', 'login_password':'', 'login_user':'root', 'login_port': '', 'ip':'', 'ssh_user':'', 'ssh_key':''}) + +keystone_config1 = resource.create('keystone_config1', 'x/resources/keystone_config/', 'rs/', {'config_dir' : '/etc/solar/keystone', 'ip':'', 'ssh_user':'', 'ssh_key':'', 'admin_token':'admin', 'db_password':'', 'db_name':'', 'db_user':'', 'db_host':''}) +keystone_service1 = resource.create('keystone_service1', 'x/resources/keystone_service/', 'rs/', {'port':'5000', 'admin_port':'35357', 'ip':'', 'ssh_key':'', 'ssh_user':'', 'config_dir':'', 'config_dir':''}) + +keystone_config2 = resource.create('keystone_config1', 'x/resources/keystone_config/', 'rs/', {'config_dir' : '/etc/solar/keystone', 'ip':'', 'ssh_user':'', 'ssh_key':'', 'admin_token':'admin', 'db_password':'', 'db_name':'', 'db_user':'', 'db_host':''}) +keystone_service2 = resource.create('keystone_service1', 'x/resources/keystone_service/', 'rs/', {'port':'5000', 'admin_port':'35357', 'ip':'', 'ssh_key':'', 'ssh_user':'', 'config_dir':'', 'config_dir':''}) + + +haproxy_keystone_config = resource.create('haproxy_keystone1_config', 'x/resources/haproxy_config/', 'rs/', {'name':'keystone_config', 'listen_port':'5000', 'servers':[], 'ports':[]}) +haproxy_config = resource.create('haproxy_config', 'x/resources/haproxy', 'rs/', {'ip':'', 'ssh_key':'', 'ssh_user':'', 'configs_names':[], 'configs_ports':[], 'listen_ports':[], 'configs':[]}) +haproxy_service = resource.create('haproxy_service', 'x/resources/docker_container//', 'rs/', {'image' : 'tutum/haproxy', 'ports': [], 'host_binds': [], 'volume_binds':[], 'ip':'', 'ssh_key':'', 'ssh_user':''}) + + +#### +# connections +#### + +#mariadb +signals.connect(node1, mariadb_service1) + +#keystone db +signals.connect(node1, keystone_db) +signals.connect(mariadb_service1, keystone_db, {'root_password':'login_password', 'port':'login_port'}) + +# keystone_db_user +signals.connect(node1, keystone_db_user) +signals.connect(mariadb_service1, keystone_db_user, {'root_password':'login_password', 'port':'login_port'}) +signals.connect(keystone_db, keystone_db_user, {'db_name':'db_name'}) + +signals.connect(node1, keystone_config1) +signals.connect(mariadb_service1, keystone_config1, {'ip':'db_host'}) +signals.connect(keystone_db_user, keystone_config1, {'db_name':'db_name', 'new_user_name':'db_user', 'new_user_password':'db_password'}) + +signals.connect(node1, keystone_service1) +signals.connect(keystone_config1, keystone_service1, {'config_dir': 'config_dir'}) + +signals.connect(node2, keystone_config2) +signals.connect(mariadb_service1, keystone_config2, {'ip':'db_host'}) +signals.connect(keystone_db_user, keystone_config2, {'db_name':'db_name', 'new_user_name':'db_user', 'new_user_password':'db_password'}) + +signals.connect(node2, keystone_service2) +signals.connect(keystone_config2, keystone_service2, {'config_dir': 'config_dir'}) + +signals.connect(keystone_service1, haproxy_keystone_config, {'ip':'servers', 'port':'ports'}) + +signals.connect(node1, haproxy_config) +signals.connect(haproxy_keystone_config, haproxy_config, {'listen_port': 'listen_ports', 'name':'configs_names', 'ports' : 'configs_ports', 'servers':'configs'}) + +signals.connect(node1, haproxy_service) +signals.connect(haproxy_config, haproxy_service, {'listen_ports':'ports', 'config_dir':'host_binds'}) + + +#run +from x import actions + +actions.resource_action(mariadb_service1, 'run') +actions.resource_action(keystone_db, 'run') +actions.resource_action(keystone_db_user, 'run') +actions.resource_action(keystone_config1, 'run') +actions.resource_action(keystone_service1, 'run') +actions.resource_action(haproxy_config, 'run') +actions.resource_action(haproxy_service, 'run') + + +#remove +actions.resource_action(haproxy_service, 'remove') +actions.resource_action(haproxy_config, 'remove') +actions.resource_action(keystone_service1, 'remove') +actions.resource_action(keystone_config1, 'remove') +actions.resource_action(keystone_db_user, 'remove') +actions.resource_action(keystone_db, 'remove') +actions.resource_action(mariadb_service1, 'remove') From 88016985a1b13fac0d35b3eaf87fa7d54f38d2e5 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 4 May 2015 15:51:28 +0200 Subject: [PATCH 84/87] TODO updated --- TODO.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TODO.md b/TODO.md index 00fb9923..f52e48a1 100644 --- a/TODO.md +++ b/TODO.md @@ -2,15 +2,15 @@ - store all resource configurations somewhere globally (this is required to correctly perform an update on one resource and bubble down to all others) -- ansible handler (loles) - config templates -- Deploy HAProxy, Keystone and MariaDB - Handler also can require some data, for example ansible: ip, ssh_key, ssh_user - tag-filtered graph generation - separate resource for docker image -- this is e.g. to make automatic image removal when some image is unused to conserve space # DONE +- Deploy HAProxy, Keystone and MariaDB +- ansible handler (loles) - tags are kept in resource mata file (pkaminski) - add 'list' connection type (pkaminski) - connections are made automaticly(pkaminski) From 5383634f5fba1affd5d71f44f50ec10bd6ccb9f9 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Fri, 8 May 2015 11:34:56 +0200 Subject: [PATCH 85/87] example.py fixes --- example.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/example.py b/example.py index fe9bed4c..2cb48565 100644 --- a/example.py +++ b/example.py @@ -12,18 +12,18 @@ if os.path.exists('rs'): os.mkdir('rs') node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) -node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) -node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.5', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) +node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) +node3 = resource.create('node3', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.5', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'}) -mariadb_service1 = resource.create('mariadb_service1', 'x/resources/mariadb_service', 'rs/', {'image':'mariadb', 'root_password' : 'mariadb', 'port' : '3306', 'ip':'', 'ssh_user':'', 'ssh_key':''}) +mariadb_service1 = resource.create('mariadb_service1', 'x/resources/mariadb_service', 'rs/', {'image':'mariadb', 'root_password' : 'mariadb', 'port' : '3306', 'ip': '', 'ssh_user': '', 'ssh_key': ''}) keystone_db = resource.create('keystone_db', 'x/resources/mariadb_db/', 'rs/', {'db_name':'keystone_db', 'login_password':'', 'login_user':'root', 'login_port': '', 'ip':'', 'ssh_user':'', 'ssh_key':''}) keystone_db_user = resource.create('keystone_db_user', 'x/resources/mariadb_user/', 'rs/', {'new_user_name' : 'keystone', 'new_user_password' : 'keystone', 'db_name':'', 'login_password':'', 'login_user':'root', 'login_port': '', 'ip':'', 'ssh_user':'', 'ssh_key':''}) keystone_config1 = resource.create('keystone_config1', 'x/resources/keystone_config/', 'rs/', {'config_dir' : '/etc/solar/keystone', 'ip':'', 'ssh_user':'', 'ssh_key':'', 'admin_token':'admin', 'db_password':'', 'db_name':'', 'db_user':'', 'db_host':''}) keystone_service1 = resource.create('keystone_service1', 'x/resources/keystone_service/', 'rs/', {'port':'5000', 'admin_port':'35357', 'ip':'', 'ssh_key':'', 'ssh_user':'', 'config_dir':'', 'config_dir':''}) -keystone_config2 = resource.create('keystone_config1', 'x/resources/keystone_config/', 'rs/', {'config_dir' : '/etc/solar/keystone', 'ip':'', 'ssh_user':'', 'ssh_key':'', 'admin_token':'admin', 'db_password':'', 'db_name':'', 'db_user':'', 'db_host':''}) -keystone_service2 = resource.create('keystone_service1', 'x/resources/keystone_service/', 'rs/', {'port':'5000', 'admin_port':'35357', 'ip':'', 'ssh_key':'', 'ssh_user':'', 'config_dir':'', 'config_dir':''}) +keystone_config2 = resource.create('keystone_config2', 'x/resources/keystone_config/', 'rs/', {'config_dir' : '/etc/solar/keystone', 'ip':'', 'ssh_user':'', 'ssh_key':'', 'admin_token':'admin', 'db_password':'', 'db_name':'', 'db_user':'', 'db_host':''}) +keystone_service2 = resource.create('keystone_service2', 'x/resources/keystone_service/', 'rs/', {'port':'5000', 'admin_port':'35357', 'ip':'', 'ssh_key':'', 'ssh_user':'', 'config_dir':'', 'config_dir':''}) haproxy_keystone_config = resource.create('haproxy_keystone1_config', 'x/resources/haproxy_config/', 'rs/', {'name':'keystone_config', 'listen_port':'5000', 'servers':[], 'ports':[]}) From e0f47188810e1aff6e4b42fe1b059f8b360a349e Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 11 May 2015 09:30:46 +0200 Subject: [PATCH 86/87] example.py fix --- example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example.py b/example.py index 2cb48565..2b499497 100644 --- a/example.py +++ b/example.py @@ -28,7 +28,7 @@ keystone_service2 = resource.create('keystone_service2', 'x/resources/keystone_s haproxy_keystone_config = resource.create('haproxy_keystone1_config', 'x/resources/haproxy_config/', 'rs/', {'name':'keystone_config', 'listen_port':'5000', 'servers':[], 'ports':[]}) haproxy_config = resource.create('haproxy_config', 'x/resources/haproxy', 'rs/', {'ip':'', 'ssh_key':'', 'ssh_user':'', 'configs_names':[], 'configs_ports':[], 'listen_ports':[], 'configs':[]}) -haproxy_service = resource.create('haproxy_service', 'x/resources/docker_container//', 'rs/', {'image' : 'tutum/haproxy', 'ports': [], 'host_binds': [], 'volume_binds':[], 'ip':'', 'ssh_key':'', 'ssh_user':''}) +haproxy_service = resource.create('haproxy_service', 'x/resources/docker_container/', 'rs/', {'image' : 'tutum/haproxy', 'ports': [], 'host_binds': [], 'volume_binds':[], 'ip':'', 'ssh_key':'', 'ssh_user':''}) #### From f4e24486533557453ea049897ee1e8876417ac20 Mon Sep 17 00:00:00 2001 From: Przemyslaw Kaminski Date: Mon, 11 May 2015 09:35:39 +0200 Subject: [PATCH 87/87] README, TODO moved to x --- README.md => x/README.md | 0 TODO.md => x/TODO.md | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename README.md => x/README.md (100%) rename TODO.md => x/TODO.md (100%) diff --git a/README.md b/x/README.md similarity index 100% rename from README.md rename to x/README.md diff --git a/TODO.md b/x/TODO.md similarity index 100% rename from TODO.md rename to x/TODO.md