Browse Source

Plugins splitted from sahara core

Change-Id: I43e0beec6508f93a436a150749bfa23571986b9d
changes/83/629183/1
Telles Nobrega 3 years ago
committed by Telles Nobrega
parent
commit
0637de5fdb
  1. 2
      .stestr.conf
  2. 35
      README.rst
  3. 9
      doc/requirements.txt
  4. 34
      errors.txt
  5. 162
      lower-constraints.txt
  6. 1
      requirements.txt
  7. 0
      sahara_plugin_vanilla/__init__.py
  8. 0
      sahara_plugin_vanilla/i18n.py
  9. 0
      sahara_plugin_vanilla/plugins/__init__.py
  10. 0
      sahara_plugin_vanilla/plugins/vanilla/__init__.py
  11. 0
      sahara_plugin_vanilla/plugins/vanilla/abstractversionhandler.py
  12. 12
      sahara_plugin_vanilla/plugins/vanilla/confighints_helper.py
  13. 6
      sahara_plugin_vanilla/plugins/vanilla/edp_engine.py
  14. 0
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/__init__.py
  15. 84
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/config.py
  16. 27
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/config_helper.py
  17. 6
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/edp_engine.py
  18. 14
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/keypairs.py
  19. 2
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/oozie_helper.py
  20. 0
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/recommendations_utils.py
  21. 0
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/create_oozie_db.sql
  22. 0
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/post_conf.template
  23. 0
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/spark-cleanup.cron
  24. 0
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/tmp-cleanup.sh.template
  25. 0
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/topology.sh
  26. 0
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/zoo_sample.cfg
  27. 84
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/run_scripts.py
  28. 46
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/scaling.py
  29. 11
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/starting_scripts.py
  30. 9
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/utils.py
  31. 11
      sahara_plugin_vanilla/plugins/vanilla/hadoop2/validation.py
  32. 4
      sahara_plugin_vanilla/plugins/vanilla/plugin.py
  33. 2
      sahara_plugin_vanilla/plugins/vanilla/utils.py
  34. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/__init__.py
  35. 44
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/config_helper.py
  36. 25
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/edp_engine.py
  37. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/README.rst
  38. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/core-default.xml
  39. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/create_hive_db.sql
  40. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/hdfs-default.xml
  41. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/hive-default.xml
  42. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/mapred-default.xml
  43. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/oozie-default.xml
  44. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/yarn-default.xml
  45. 62
      sahara_plugin_vanilla/plugins/vanilla/v2_7_1/versionhandler.py
  46. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/__init__.py
  47. 44
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/config_helper.py
  48. 27
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/edp_engine.py
  49. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/resources/README.rst
  50. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/resources/core-default.xml
  51. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/resources/create_hive_db.sql
  52. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/resources/hdfs-default.xml
  53. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/resources/hive-default.xml
  54. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/resources/mapred-default.xml
  55. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/resources/oozie-default.xml
  56. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/resources/yarn-default.xml
  57. 62
      sahara_plugin_vanilla/plugins/vanilla/v2_7_5/versionhandler.py
  58. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/__init__.py
  59. 44
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/config_helper.py
  60. 29
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/edp_engine.py
  61. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/resources/README.rst
  62. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/resources/core-default.xml
  63. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/resources/create_hive_db.sql
  64. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/resources/hdfs-default.xml
  65. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/resources/hive-default.xml
  66. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/resources/mapred-default.xml
  67. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/resources/oozie-default.xml
  68. 0
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/resources/yarn-default.xml
  69. 62
      sahara_plugin_vanilla/plugins/vanilla/v2_8_2/versionhandler.py
  70. 8
      sahara_plugin_vanilla/plugins/vanilla/versionfactory.py
  71. 2
      sahara_plugin_vanilla/tests/__init__.py
  72. 0
      sahara_plugin_vanilla/tests/unit/__init__.py
  73. 36
      sahara_plugin_vanilla/tests/unit/base.py
  74. 0
      sahara_plugin_vanilla/tests/unit/plugins/__init__.py
  75. 0
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/__init__.py
  76. 0
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/__init__.py
  77. 0
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/resources/dfs-report.txt
  78. 0
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/resources/yarn-report.txt
  79. 75
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_config_helper.py
  80. 4
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_configs.py
  81. 17
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_edp_engine.py
  82. 11
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_oozie_helper.py
  83. 14
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_plugin.py
  84. 4
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_recommendation_utils.py
  85. 94
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_run_scripts.py
  86. 42
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_scaling.py
  87. 44
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_starting_scripts.py
  88. 43
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_utils.py
  89. 6
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/hadoop2/test_validation.py
  90. 29
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/test_confighints_helper.py
  91. 8
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/test_utils.py
  92. 0
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/v2_7_1/__init__.py
  93. 26
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/v2_7_1/test_config_helper.py
  94. 20
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/v2_7_1/test_edp_engine.py
  95. 37
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/v2_7_1/test_versionhandler.py
  96. 0
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/v2_7_5/__init__.py
  97. 26
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/v2_7_5/test_config_helper.py
  98. 20
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/v2_7_5/test_edp_engine.py
  99. 37
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/v2_7_5/test_versionhandler.py
  100. 0
      sahara_plugin_vanilla/tests/unit/plugins/vanilla/v2_8_2/__init__.py

2
.stestr.conf

@ -1,3 +1,3 @@
[DEFAULT]
test_path=./sahara/tests/unit
test_path=./sahara_plugin_vanilla/tests/unit
top_dir=./

35
README.rst

@ -0,0 +1,35 @@
========================
Team and repository tags
========================
.. image:: https://governance.openstack.org/tc/badges/sahara.svg
:target: https://governance.openstack.org/tc/reference/tags/index.html
.. Change things from this point on
OpenStack Data Processing ("Sahara") project
============================================
Sahara at wiki.openstack.org: https://wiki.openstack.org/wiki/Sahara
Storyboard project: https://storyboard.openstack.org/#!/project/935
Sahara docs site: https://docs.openstack.org/sahara/latest/
Roadmap: https://wiki.openstack.org/wiki/Sahara/Roadmap
Quickstart guide: https://docs.openstack.org/sahara/latest/user/quickstart.html
How to participate: https://docs.openstack.org/sahara/latest/contributor/how-to-participate.html
Source: https://git.openstack.org/cgit/openstack/sahara
Bugs and feature requests: https://storyboard.openstack.org/#!/project/935
Release notes: https://docs.openstack.org/releasenotes/sahara/
License
-------
Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0

9
doc/requirements.txt

@ -0,0 +1,9 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
openstackdocstheme>=1.18.1 # Apache-2.0
os-api-ref>=1.4.0 # Apache-2.0
reno>=2.5.0 # Apache-2.0
sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
sphinxcontrib-httpdomain>=1.3.0 # BSD
whereto>=0.3.0 # Apache-2.0

34
errors.txt

@ -0,0 +1,34 @@
py27 develop-inst-nodeps: /home/tenobreg/coding/upstream/sahara/sahara
py27 installed: alabaster==0.7.11,alembic==1.0.0,amqp==2.3.2,appdirs==1.4.3,asn1crypto==0.24.0,astroid==1.3.8,Babel==2.6.0,bandit==1.5.0,bashate==0.6.0,bcrypt==3.1.4,botocore==1.10.62,cachetools==2.1.0,castellan==0.18.0,certifi==2018.4.16,cffi==1.11.5,chardet==3.0.4,click==6.7,cliff==2.13.0,cmd2==0.8.8,contextlib2==0.5.5,coverage==4.5.1,cryptography==2.3,debtcollector==1.20.0,decorator==4.3.0,deprecation==2.0.5,doc8==0.8.0,docutils==0.14,dogpile.cache==0.6.6,dulwich==0.19.5,enum-compat==0.0.2,enum34==1.1.6,eventlet==0.20.0,extras==1.0.0,fasteners==0.14.1,fixtures==3.0.0,flake8==2.5.5,Flask==1.0.2,funcsigs==1.0.2,functools32==3.2.3.post2,future==0.16.0,futures==3.2.0,futurist==1.7.0,gitdb2==2.0.4,GitPython==2.1.11,greenlet==0.4.13,hacking==0.12.0,idna==2.7,imagesize==1.0.0,ipaddress==1.0.22,iso8601==0.1.12,itsdangerous==0.24,Jinja2==2.10,jmespath==0.9.3,jsonpatch==1.23,jsonpointer==2.0,jsonschema==2.6.0,keystoneauth1==3.10.0,keystonemiddleware==5.2.0,kombu==4.2.1,linecache2==1.0.0,logilab-common==1.4.2,Mako==1.0.7,MarkupSafe==1.0,mccabe==0.2.1,mock==2.0.0,monotonic==1.5,mox3==0.26.0,msgpack==0.5.6,munch==2.3.2,netaddr==0.7.19,netifaces==0.10.7,openstackdocstheme==1.22.0,openstacksdk==0.17.2,os-api-ref==1.5.0,os-client-config==1.31.2,os-service-types==1.3.0,os-testr==1.0.0,osc-lib==1.11.1,oslo.cache==1.30.1,oslo.concurrency==3.27.0,oslo.config==6.4.0,oslo.context==2.21.0,oslo.db==4.40.0,oslo.i18n==3.21.0,oslo.log==3.39.0,oslo.messaging==8.1.0,oslo.middleware==3.36.0,oslo.policy==1.38.1,oslo.rootwrap==5.14.1,oslo.serialization==2.27.0,oslo.service==1.31.3,oslo.utils==3.36.4,oslotest==3.6.0,packaging==17.1,paramiko==2.4.1,Paste==2.0.3,PasteDeploy==1.5.2,pbr==4.2.0,pep8==1.5.7,prettytable==0.7.2,psycopg2==2.7.5,pyasn1==0.4.3,pycadf==2.8.0,pycparser==2.18,pyflakes==0.8.1,Pygments==2.2.0,pyinotify==0.9.6,pylint==1.4.5,PyMySQL==0.9.2,PyNaCl==1.2.1,pyOpenSSL==18.0.0,pyparsing==2.2.0,pyperclip==1.6.4,python-barbicanclient==4.7.0,python-cinderclient==4.0.1,python-dateutil==2.7.3,python-editor==1.0.3,python-glanceclient==2.12.1,python-heatclient==1.16.1,python-keystoneclient==3.17.0,python-manilaclient==1.24.1,python-mimeparse==1.6.0,python-neutronclient==6.9.0,python-novaclient==11.0.0,python-openstackclient==3.16.0,python-saharaclient==2.0.0,python-subunit==1.3.0,python-swiftclient==3.6.0,pytz==2018.5,PyYAML==3.13,reno==2.9.2,repoze.lru==0.7,requests==2.19.1,requestsexceptions==1.4.0,restructuredtext-lint==1.1.3,rfc3986==1.1.0,Routes==2.4.1,-e git+https://github.com/openstack/sahara.git@efb05b3624044f307168d0b5da888132f51aebb7#egg=sahara,simplejson==3.16.0,six==1.11.0,smmap2==2.0.4,snowballstemmer==1.2.1,Sphinx==1.7.6,sphinxcontrib-httpdomain==1.7.0,sphinxcontrib-websupport==1.1.0,SQLAlchemy==1.2.10,sqlalchemy-migrate==0.11.0,sqlparse==0.2.4,statsd==3.2.2,stestr==2.1.0,stevedore==1.29.0,subprocess32==3.5.2,Tempita==0.5.2,tenacity==4.12.0,testresources==2.0.1,testscenarios==0.5.0,testtools==2.3.0,tooz==1.62.0,traceback2==1.4.0,typing==3.6.4,unicodecsv==0.14.1,unittest2==1.1.0,urllib3==1.23,vine==1.1.4,voluptuous==0.11.1,warlock==1.3.0,wcwidth==0.1.7,WebOb==1.8.2,Werkzeug==0.14.1,wrapt==1.10.11
py27 runtests: PYTHONHASHSEED='839100177'
py27 runtests: commands[0] | ostestr
=========================
Failures during discovery
=========================
--- import errors ---
Failed to import test module: sahara.tests.unit.service.edp.spark.test_shell
Traceback (most recent call last):
File "/home/tenobreg/coding/upstream/sahara/sahara/.tox/py27/lib/python2.7/site-packages/unittest2/loader.py", line 456, in _find_test_path
module = self._get_module_from_name(name)
File "/home/tenobreg/coding/upstream/sahara/sahara/.tox/py27/lib/python2.7/site-packages/unittest2/loader.py", line 395, in _get_module_from_name
__import__(name)
File "sahara/tests/unit/service/edp/spark/test_shell.py", line 18, in <module>
from sahara.plugins.spark import shell_engine
ImportError: No module named spark
Failed to import test module: sahara.tests.unit.service.edp.spark.test_spark
Traceback (most recent call last):
File "/home/tenobreg/coding/upstream/sahara/sahara/.tox/py27/lib/python2.7/site-packages/unittest2/loader.py", line 456, in _find_test_path
module = self._get_module_from_name(name)
File "/home/tenobreg/coding/upstream/sahara/sahara/.tox/py27/lib/python2.7/site-packages/unittest2/loader.py", line 395, in _get_module_from_name
__import__(name)
File "sahara/tests/unit/service/edp/spark/test_spark.py", line 17, in <module>
from sahara.plugins.spark import edp_engine as spark_edp
ImportError: No module named spark
================================================================================
The above traceback was encountered during test discovery which imports all the found test modules in the specified test_path.
ERROR: InvocationError: '/home/tenobreg/coding/upstream/sahara/sahara/.tox/py27/bin/ostestr'
___________________________________ summary ____________________________________
ERROR: py27: commands failed

162
lower-constraints.txt

@ -0,0 +1,162 @@
alabaster==0.7.10
alembic==0.8.10
amqp==2.2.2
appdirs==1.4.3
asn1crypto==0.24.0
astroid==1.3.8
Babel==2.3.4
bandit==1.1.0
bashate==0.5.1
bcrypt==3.1.4
botocore==1.5.1
cachetools==2.0.1
castellan==0.16.0
certifi==2018.1.18
cffi==1.11.5
chardet==3.0.4
click==6.7
cliff==2.11.0
cmd2==0.8.1
contextlib2==0.5.5
coverage==4.0
cryptography==2.1.4
debtcollector==1.19.0
decorator==4.2.1
deprecation==2.0
doc8==0.6.0
docutils==0.14
dogpile.cache==0.6.5
dulwich==0.19.0
enum-compat==0.0.2
eventlet==0.18.2
extras==1.0.0
fasteners==0.14.1
fixtures==3.0.0
flake8==2.6.2
Flask==1.0.2
future==0.16.0
futurist==1.6.0
gitdb2==2.0.3
GitPython==2.1.8
greenlet==0.4.13
hacking==1.1.0
idna==2.6
imagesize==1.0.0
iso8601==0.1.11
itsdangerous==0.24
Jinja2==2.10
jmespath==0.9.3
jsonpatch==1.21
jsonpointer==2.0
jsonschema==2.6.0
keystoneauth1==3.4.0
keystonemiddleware==4.17.0
kombu==4.1.0
linecache2==1.0.0
logilab-common==1.4.1
Mako==1.0.7
MarkupSafe==1.0
mccabe==0.2.1
mock==2.0.0
monotonic==1.4
mox3==0.25.0
msgpack==0.5.6
munch==2.2.0
netaddr==0.7.19
netifaces==0.10.6
openstackdocstheme==1.18.1
openstacksdk==0.12.0
os-api-ref==1.4.0
os-client-config==1.29.0
os-service-types==1.2.0
osc-lib==1.10.0
oslo.cache==1.29.0
oslo.concurrency==3.26.0
oslo.config==5.2.0
oslo.context==2.19.2
oslo.db==4.27.0
oslo.i18n==3.15.3
oslo.log==3.36.0
oslo.messaging==5.29.0
oslo.middleware==3.31.0
oslo.policy==1.30.0
oslo.rootwrap==5.8.0
oslo.serialization==2.18.0
oslo.service==1.24.0
oslo.upgradecheck==0.1.0
oslo.utils==3.33.0
oslotest==3.2.0
packaging==17.1
paramiko==2.0.0
Paste==2.0.3
PasteDeploy==1.5.2
pbr==2.0.0
pika-pool==0.1.3
pika==0.10.0
prettytable==0.7.2
psycopg2==2.6.2
pyasn1==0.4.2
pycadf==2.7.0
pycparser==2.18
pycodestyle==2.4.0
pyflakes==0.8.1
Pygments==2.2.0
pyinotify==0.9.6
pylint==1.4.5
PyMySQL==0.7.6
PyNaCl==1.2.1
pyOpenSSL==17.5.0
pyparsing==2.2.0
pyperclip==1.6.0
python-barbicanclient==4.6.0
python-cinderclient==3.3.0
python-dateutil==2.7.0
python-editor==1.0.3
python-glanceclient==2.8.0
python-heatclient==1.10.0
python-keystoneclient==3.8.0
python-manilaclient==1.16.0
python-mimeparse==1.6.0
python-neutronclient==6.7.0
python-novaclient==9.1.0
python-openstackclient==3.14.0
python-saharaclient==1.4.0
python-subunit==1.2.0
python-swiftclient==3.2.0
pytz==2018.3
PyYAML==3.12
reno==2.5.0
repoze.lru==0.7
requests==2.14.2
requestsexceptions==1.4.0
restructuredtext-lint==1.1.3
rfc3986==1.1.0
Routes==2.4.1
simplejson==3.13.2
six==1.10.0
smmap2==2.0.3
snowballstemmer==1.2.1
Sphinx==1.6.2
sphinxcontrib-httpdomain==1.3.0
sphinxcontrib-websupport==1.0.1
sqlalchemy-migrate==0.11.0
SQLAlchemy==1.0.10
sqlparse==0.2.4
statsd==3.2.2
stestr==1.0.0
stevedore==1.20.0
Tempita==0.5.2
tenacity==4.9.0
testresources==2.0.0
testscenarios==0.4
testtools==2.2.0
tooz==1.58.0
traceback2==1.4.0
unittest2==1.1.0
urllib3==1.22
vine==1.1.4
voluptuous==0.11.1
warlock==1.3.0
WebOb==1.7.1
Werkzeug==0.14.1
wrapt==1.10.11

1
requirements.txt

@ -39,6 +39,7 @@ python-swiftclient>=3.2.0 # Apache-2.0
python-neutronclient>=6.7.0 # Apache-2.0
python-heatclient>=1.10.0 # Apache-2.0
python-glanceclient>=2.8.0 # Apache-2.0
sahara
six>=1.10.0 # MIT
stevedore>=1.20.0 # Apache-2.0
SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT

0
sahara/__init__.py → sahara_plugin_vanilla/__init__.py

0
sahara/i18n.py → sahara_plugin_vanilla/i18n.py

0
sahara/plugins/__init__.py → sahara_plugin_vanilla/plugins/__init__.py

0
sahara/plugins/vanilla/__init__.py → sahara_plugin_vanilla/plugins/vanilla/__init__.py

0
sahara/plugins/vanilla/abstractversionhandler.py → sahara_plugin_vanilla/plugins/vanilla/abstractversionhandler.py

12
sahara/plugins/vanilla/confighints_helper.py → sahara_plugin_vanilla/plugins/vanilla/confighints_helper.py

@ -13,14 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.edp.oozie.workflow_creator import workflow_factory
from sahara.utils import xmlutils
from sahara.plugins import edp
from sahara.plugins import utils
def get_possible_hive_config_from(file_name):
'''Return the possible configs, args, params for a Hive job.'''
config = {
'configs': xmlutils.load_hadoop_xml_defaults(file_name),
'configs': utils.load_hadoop_xml_defaults(file_name,
'sahara_plugin_vanilla'),
'params': {}
}
return config
@ -31,14 +32,15 @@ def get_possible_mapreduce_config_from(file_name):
config = {
'configs': get_possible_pig_config_from(file_name).get('configs')
}
config['configs'] += workflow_factory.get_possible_mapreduce_configs()
config['configs'] += edp.get_possible_mapreduce_configs()
return config
def get_possible_pig_config_from(file_name):
'''Return the possible configs, args, params for a Pig job.'''
config = {
'configs': xmlutils.load_hadoop_xml_defaults(file_name),
'configs': utils.load_hadoop_xml_defaults(file_name,
'sahara_plugin_vanilla'),
'args': [],
'params': {}
}

6
sahara/plugins/vanilla/edp_engine.py → sahara_plugin_vanilla/plugins/vanilla/edp_engine.py

@ -13,13 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins import edp
from sahara.plugins import exceptions as ex
from sahara.plugins import utils as u
from sahara.plugins.vanilla import utils as vu
from sahara.service.edp.oozie import engine as edp_engine
from sahara_plugin_vanilla.plugins.vanilla import utils as vu
class EdpOozieEngine(edp_engine.OozieJobEngine):
class EdpOozieEngine(edp.PluginsOozieJobEngine):
def get_hdfs_user(self):
return 'hadoop'

0
sahara/plugins/vanilla/hadoop2/__init__.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/__init__.py

84
sahara/plugins/vanilla/hadoop2/config.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/config.py

@ -18,21 +18,16 @@ from oslo_config import cfg
from oslo_log import log as logging
import six
from sahara import context
from sahara.i18n import _
from sahara.plugins import castellan_utils as key_manager
from sahara.plugins import context
from sahara.plugins import swift_helper as swift
from sahara.plugins import topology_helper as th
from sahara.plugins import utils
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.plugins.vanilla.hadoop2 import oozie_helper as o_helper
from sahara.plugins.vanilla.hadoop2 import utils as u
from sahara.plugins.vanilla import utils as vu
from sahara.service.castellan import utils as key_manager
from sahara.swift import swift_helper as swift
from sahara.topology import topology_helper as th
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import configs as s_cfg
from sahara.utils import files as f
from sahara.utils import proxy
from sahara.utils import xmlutils as x
from sahara_plugin_vanilla.i18n import _
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import config_helper
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import oozie_helper
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import utils as u
from sahara_plugin_vanilla.plugins.vanilla import utils as vu
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -61,8 +56,8 @@ def configure_cluster(pctx, cluster):
LOG.debug("Configuring cluster")
if (CONF.use_identity_api_v3 and CONF.use_domain_for_proxy_users and
vu.get_hiveserver(cluster) and
c_helper.is_swift_enabled(pctx, cluster)):
cluster = proxy.create_proxy_user_for_cluster(cluster)
config_helper.is_swift_enabled(pctx, cluster)):
cluster = utils.create_proxy_user_for_cluster(cluster)
instances = utils.get_instances(cluster)
configure_instances(pctx, instances)
@ -74,7 +69,7 @@ def configure_cluster(pctx, cluster):
def configure_zookeeper(cluster, instances=None):
zk_servers = vu.get_zk_servers(cluster)
if zk_servers:
zk_conf = c_helper.generate_zk_basic_config(cluster)
zk_conf = config_helper.generate_zk_basic_config(cluster)
zk_conf += _form_zk_servers_to_quorum(cluster, instances)
_push_zk_configs_to_nodes(cluster, zk_conf, instances)
@ -126,7 +121,7 @@ def _push_spark_configs_to_node(cluster, extra):
def _push_spark_configs_to_existing_node(spark_master, cluster, extra):
sp_home = c_helper.get_spark_home(cluster)
sp_home = config_helper.get_spark_home(cluster)
files = {
os.path.join(sp_home,
'conf/spark-env.sh'): extra['sp_master'],
@ -160,13 +155,13 @@ def _extract_spark_configs_to_extra(cluster):
config_master = ''
if sp_master is not None:
config_master = c_helper.generate_spark_env_configs(cluster)
config_master = config_helper.generate_spark_env_configs(cluster)
# Any node that might be used to run spark-submit will need
# these libs for swift integration
config_defaults = c_helper.generate_spark_executor_classpath(cluster)
config_defaults = config_helper.generate_spark_executor_classpath(cluster)
extra['job_cleanup'] = c_helper.generate_job_cleanup_config(cluster)
extra['job_cleanup'] = config_helper.generate_job_cleanup_config(cluster)
extra['sp_master'] = config_master
extra['sp_defaults'] = config_defaults
@ -177,7 +172,7 @@ def configure_instances(pctx, instances):
if len(instances) == 0:
return
cpo.add_provisioning_step(
utils.add_provisioning_step(
instances[0].cluster_id, _("Configure instances"), len(instances))
for instance in instances:
@ -185,7 +180,7 @@ def configure_instances(pctx, instances):
_configure_instance(pctx, instance)
@cpo.event_wrapper(True)
@utils.event_wrapper(True)
def _configure_instance(pctx, instance):
_provisioning_configs(pctx, instance)
_post_configuration(pctx, instance)
@ -201,8 +196,8 @@ def _generate_configs(pctx, instance):
hadoop_xml_confs = _get_hadoop_configs(pctx, instance)
user_xml_confs, user_env_confs = _get_user_configs(
pctx, instance.node_group)
xml_confs = s_cfg.merge_configs(user_xml_confs, hadoop_xml_confs)
env_confs = s_cfg.merge_configs(pctx['env_confs'], user_env_confs)
xml_confs = utils.merge_configs(user_xml_confs, hadoop_xml_confs)
env_confs = utils.merge_configs(pctx['env_confs'], user_env_confs)
return xml_confs, env_confs
@ -249,20 +244,21 @@ def _get_hadoop_configs(pctx, instance):
}
confs['Hadoop'].update(hadoop_cfg)
oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
if c_helper.is_mysql_enabled(pctx, cluster):
oozie_cfg.update(o_helper.get_oozie_mysql_configs(cluster))
oozie_cfg = oozie_helper.get_oozie_required_xml_configs(
HADOOP_CONF_DIR)
if config_helper.is_mysql_enabled(pctx, cluster):
oozie_cfg.update(oozie_helper.get_oozie_mysql_configs(cluster))
confs['JobFlow'] = oozie_cfg
if c_helper.is_swift_enabled(pctx, cluster):
if config_helper.is_swift_enabled(pctx, cluster):
swift_configs = {}
for config in swift.get_swift_configs():
swift_configs[config['name']] = config['value']
confs['Hadoop'].update(swift_configs)
if c_helper.is_data_locality_enabled(pctx, cluster):
if config_helper.is_data_locality_enabled(pctx, cluster):
confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
confs['Hadoop'].update({"topology.script.file.name":
HADOOP_CONF_DIR + "/topology.sh"})
@ -277,7 +273,7 @@ def _get_hadoop_configs(pctx, instance):
'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
}
if c_helper.is_mysql_enabled(pctx, cluster):
if config_helper.is_mysql_enabled(pctx, cluster):
hive_cfg.update({
'javax.jdo.option.ConnectionURL':
'jdbc:mysql://%s/metastore' % hive_hostname,
@ -291,7 +287,7 @@ def _get_hadoop_configs(pctx, instance):
})
proxy_configs = cluster.cluster_configs.get('proxy_configs')
if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
if proxy_configs and config_helper.is_swift_enabled(pctx, cluster):
hive_cfg.update({
swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
swift.HADOOP_SWIFT_PASSWORD: key_manager.get_secret(
@ -311,8 +307,8 @@ def _get_user_configs(pctx, node_group):
cl_xml_confs, cl_env_confs = _separate_configs(
node_group.cluster.cluster_configs, pctx['env_confs'])
xml_confs = s_cfg.merge_configs(cl_xml_confs, ng_xml_confs)
env_confs = s_cfg.merge_configs(cl_env_confs, ng_env_confs)
xml_confs = utils.merge_configs(cl_xml_confs, ng_xml_confs)
env_confs = utils.merge_configs(cl_env_confs, ng_env_confs)
return xml_confs, env_confs
@ -336,7 +332,7 @@ def _separate_configs(configs, all_env_configs):
def _generate_xml(configs):
xml_confs = {}
for service, confs in six.iteritems(configs):
xml_confs[service] = x.create_hadoop_xml(confs)
xml_confs[service] = utils.create_hadoop_xml(confs)
return xml_confs
@ -417,8 +413,9 @@ def _post_configuration(pctx, instance):
'hadoop_secure_dn_log_dir': dirs['hadoop_secure_dn_log_dir'],
'yarn_log_dir': dirs['yarn_log_dir']
}
post_conf_script = f.get_file_text(
'plugins/vanilla/hadoop2/resources/post_conf.template')
post_conf_script = utils.get_file_text(
'plugins/vanilla/hadoop2/resources/post_conf.template',
'sahara_plugin_vanilla')
post_conf_script = post_conf_script.format(**args)
with instance.remote() as r:
@ -426,12 +423,11 @@ def _post_configuration(pctx, instance):
r.execute_command('chmod +x /tmp/post_conf.sh')
r.execute_command('sudo /tmp/post_conf.sh')
if c_helper.is_data_locality_enabled(pctx,
instance.cluster):
if config_helper.is_data_locality_enabled(pctx, instance.cluster):
t_script = HADOOP_CONF_DIR + '/topology.sh'
r.write_file_to(t_script, f.get_file_text(
'plugins/vanilla/hadoop2/resources/topology.sh'),
run_as_root=True)
r.write_file_to(t_script, utils.get_file_text(
'plugins/vanilla/hadoop2/resources/topology.sh',
'sahara_plugin_vanilla'), run_as_root=True)
r.execute_command('chmod +x ' + t_script, run_as_root=True)
@ -456,10 +452,10 @@ def _make_hadoop_paths(paths, hadoop_dir):
return [path + hadoop_dir for path in paths]
@cpo.event_wrapper(
@utils.event_wrapper(
True, step=_("Configure topology data"), param=('cluster', 1))
def configure_topology_data(pctx, cluster):
if c_helper.is_data_locality_enabled(pctx, cluster):
if config_helper.is_data_locality_enabled(pctx, cluster):
LOG.warning("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False explicitly")
tpl_map = th.generate_topology_map(cluster, is_node_awareness=False)

27
sahara/plugins/vanilla/hadoop2/config_helper.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/config_helper.py

@ -1,4 +1,4 @@
# Copyright (c) 2014 Mirantis Inc.
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -16,12 +16,10 @@
from oslo_config import cfg
import six
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.plugins import exceptions as ex
from sahara.plugins import provisioning as p
from sahara.plugins import utils
from sahara.utils import files as f
from sahara.utils import types
from sahara_plugin_vanilla.i18n import _
CONF = cfg.CONF
CONF.import_opt("enable_data_locality", "sahara.topology.topology_helper")
@ -165,7 +163,7 @@ def init_xml_configs(xml_confs):
if cfg.default_value in ["true", "false"]:
cfg.config_type = "bool"
cfg.default_value = (cfg.default_value == 'true')
elif types.is_int(cfg.default_value):
elif utils.is_int(cfg.default_value):
cfg.config_type = "int"
cfg.default_value = int(cfg.default_value)
if config['name'] in CLUSTER_WIDE_CONFS:
@ -245,7 +243,7 @@ def get_config_value(pctx, service, name, cluster=None):
if c.applicable_target == service and c.name == name:
return c.default_value
raise ex.NotFoundException(
raise ex.PluginNotFoundException(
{"name": name, "service": service},
_("Unable to get parameter '%(name)s' from service %(service)s"))
@ -303,10 +301,12 @@ def generate_job_cleanup_config(cluster):
(args['minimum_cleanup_megabytes'] > 0
and args['minimum_cleanup_seconds'] > 0))}
if job_conf['valid']:
job_conf['cron'] = f.get_file_text(
'plugins/vanilla/hadoop2/resources/spark-cleanup.cron'),
job_cleanup_script = f.get_file_text(
'plugins/vanilla/hadoop2/resources/tmp-cleanup.sh.template')
job_conf['cron'] = utils.get_file_text(
'plugins/vanilla/hadoop2/resources/spark-cleanup.cron',
'sahara_plugin_vanilla'),
job_cleanup_script = utils.get_file_text(
'plugins/vanilla/hadoop2/resources/tmp-cleanup.sh.template',
'sahara_plugin_vanilla')
job_conf['script'] = job_cleanup_script.format(**args)
return job_conf
@ -324,6 +324,7 @@ def generate_zk_basic_config(cluster):
'synclimit': utils.get_config_value_or_default(
"ZooKeeper", "syncLimit", cluster)
}
zoo_cfg = f.get_file_text(
'plugins/vanilla/hadoop2/resources/zoo_sample.cfg')
zoo_cfg = utils.get_file_text(
'plugins/vanilla/hadoop2/resources/zoo_sample.cfg',
'sahara_plugin_vanilla')
return zoo_cfg.format(**args)

6
sahara/plugins/vanilla/hadoop2/edp_engine.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/edp_engine.py

@ -13,14 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.vanilla import edp_engine
from sahara.service.edp import hdfs_helper
from sahara.plugins import edp
from sahara_plugin_vanilla.plugins.vanilla import edp_engine
class EdpOozieEngine(edp_engine.EdpOozieEngine):
def create_hdfs_dir(self, remote, dir_name):
hdfs_helper.create_dir_hadoop2(remote, dir_name, self.get_hdfs_user())
edp.create_dir_hadoop2(remote, dir_name, self.get_hdfs_user())
def get_resource_manager_uri(self, cluster):
return cluster['info']['YARN']['ResourceManager']

14
sahara/plugins/vanilla/hadoop2/keypairs.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/keypairs.py

@ -18,12 +18,10 @@ from castellan import key_manager
from oslo_log import log as logging
from sahara import conductor
from sahara import context
from sahara.utils import cluster as utils
from sahara.utils import crypto
from sahara.plugins import conductor
from sahara.plugins import context
from sahara.plugins import utils
cond = conductor.API
LOG = logging.getLogger(__name__)
@ -72,15 +70,15 @@ def provision_keypairs(cluster, instances=None):
# cluster created before mitaka, skipping provisioning
return
if not keypair:
private, public = crypto.generate_key_pair()
private, public = utils.generate_key_pair()
keypair = {'public': public, 'private': private}
extra['vanilla_keypair'] = keypair
extra['vanilla_keypair']['private'] = _store_secret(
keypair['private'])
cond.cluster_update(context.ctx(), cluster, {'extra': extra})
conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
else:
keypair['private'] = _get_secret(keypair['private'])
with context.ThreadGroup() as tg:
with context.PluginsThreadGroup() as tg:
for instance in instances:
tg.spawn(
'provision-key-%s' % instance.instance_name,

2
sahara/plugins/vanilla/hadoop2/oozie_helper.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/oozie_helper.py

@ -12,7 +12,7 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.vanilla.hadoop2 import utils as u
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import utils as u
def get_oozie_required_xml_configs(hadoop_conf_dir):

0
sahara/plugins/vanilla/hadoop2/recommendations_utils.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/recommendations_utils.py

0
sahara/plugins/vanilla/hadoop2/resources/create_oozie_db.sql → sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/create_oozie_db.sql

0
sahara/plugins/vanilla/hadoop2/resources/post_conf.template → sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/post_conf.template

0
sahara/plugins/vanilla/hadoop2/resources/spark-cleanup.cron → sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/spark-cleanup.cron

0
sahara/plugins/vanilla/hadoop2/resources/tmp-cleanup.sh.template → sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/tmp-cleanup.sh.template

0
sahara/plugins/vanilla/hadoop2/resources/topology.sh → sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/topology.sh

0
sahara/plugins/vanilla/hadoop2/resources/zoo_sample.cfg → sahara_plugin_vanilla/plugins/vanilla/hadoop2/resources/zoo_sample.cfg

84
sahara/plugins/vanilla/hadoop2/run_scripts.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/run_scripts.py

@ -17,34 +17,31 @@ import os
from oslo_log import log as logging
from sahara import context
from sahara.i18n import _
from sahara.plugins import utils as pu
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.plugins.vanilla.hadoop2 import oozie_helper
from sahara.plugins.vanilla.hadoop2 import utils as u
from sahara.plugins.vanilla import utils as vu
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import edp
from sahara.utils import files
from sahara.utils import poll_utils
from sahara.plugins import context
from sahara.plugins import edp
from sahara.plugins import utils
from sahara_plugin_vanilla.i18n import _
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import config_helper
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import oozie_helper
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import utils as u
from sahara_plugin_vanilla.plugins.vanilla import utils as vu
LOG = logging.getLogger(__name__)
def start_dn_nm_processes(instances):
filternames = ['datanode', 'nodemanager']
instances = pu.instances_with_services(instances, filternames)
instances = utils.instances_with_services(instances, filternames)
if len(instances) == 0:
return
cpo.add_provisioning_step(
utils.add_provisioning_step(
instances[0].cluster_id,
pu.start_process_event_message("DataNodes, NodeManagers"),
utils.start_process_event_message("DataNodes, NodeManagers"),
len(instances))
with context.ThreadGroup() as tg:
with context.PluginsThreadGroup() as tg:
for instance in instances:
with context.set_current_instance_id(instance.instance_id):
processes = set(instance.node_group.node_processes)
@ -53,7 +50,7 @@ def start_dn_nm_processes(instances):
_start_processes, instance, list(processes))
@cpo.event_wrapper(True)
@utils.event_wrapper(True)
def _start_processes(instance, processes):
with instance.remote() as r:
if 'datanode' in processes:
@ -74,21 +71,23 @@ def start_yarn_process(instance, process):
'sudo su - -c "yarn-daemon.sh start %s" hadoop' % process)
@cpo.event_wrapper(True, step=pu.start_process_event_message("HistoryServer"))
@utils.event_wrapper(
True, step=utils.start_process_event_message("HistoryServer"))
def start_historyserver(instance):
instance.remote().execute_command(
'sudo su - -c "mr-jobhistory-daemon.sh start historyserver" hadoop')
@cpo.event_wrapper(True, step=pu.start_process_event_message("Oozie"))
@utils.event_wrapper(True, step=utils.start_process_event_message("Oozie"))
def start_oozie_process(pctx, instance):
with context.set_current_instance_id(instance.instance_id):
with instance.remote() as r:
if c_helper.is_mysql_enabled(pctx, instance.cluster):
if config_helper.is_mysql_enabled(pctx, instance.cluster):
_start_mysql(r)
LOG.debug("Creating Oozie DB Schema")
sql_script = files.get_file_text(
'plugins/vanilla/hadoop2/resources/create_oozie_db.sql')
sql_script = utils.get_file_text(
'plugins/vanilla/hadoop2/resources/create_oozie_db.sql',
'sahara_plugin_vanilla')
password = oozie_helper.get_oozie_mysql_configs(
instance.cluster)[
@ -105,10 +104,10 @@ def start_oozie_process(pctx, instance):
_start_oozie(r)
@cpo.event_wrapper(
True, step=pu.start_process_event_message("Spark History Server"))
@utils.event_wrapper(
True, step=utils.start_process_event_message("Spark History Server"))
def start_spark_history_server(master):
sp_home = c_helper.get_spark_home(master.cluster)
sp_home = config_helper.get_spark_home(master.cluster)
with context.set_current_instance_id(master.instance_id):
with master.remote() as r:
r.execute_command('sudo su - -c "bash %s" hadoop' % os.path.join(
@ -116,12 +115,12 @@ def start_spark_history_server(master):
def start_zk_server(instances):
cpo.add_provisioning_step(
utils.add_provisioning_step(
instances[0].cluster_id,
pu.start_process_event_message("ZooKeeper"),
utils.start_process_event_message("ZooKeeper"),
len(instances))
with context.ThreadGroup() as tg:
with context.PluginsThreadGroup() as tg:
for instance in instances:
with context.set_current_instance_id(instance.instance_id):
tg.spawn('ZK-start-processes-%s' % instance.instance_name,
@ -135,19 +134,19 @@ def refresh_zk_servers(cluster, to_delete_instances=None):
if instance in instances:
instances.remove(instance)
cpo.add_provisioning_step(
utils.add_provisioning_step(
cluster.id,
pu.start_process_event_message("ZooKeeper"),
utils.start_process_event_message("ZooKeeper"),
len(instances))
with context.ThreadGroup() as tg:
with context.PluginsThreadGroup() as tg:
for instance in instances:
with context.set_current_instance_id(instance.instance_id):
tg.spawn('ZK-restart-processes-%s' % instance.instance_name,
_start_zk_processes, instance, 'restart')
@cpo.event_wrapper(True)
@utils.event_wrapper(True)
def _start_zk_processes(instance, operation):
with instance.remote() as r:
r.execute_command(
@ -160,15 +159,16 @@ def format_namenode(instance):
'sudo su - -c "hdfs namenode -format" hadoop')
@cpo.event_wrapper(
True, step=pu.start_process_event_message("Oozie"), param=('cluster', 0))
@utils.event_wrapper(
True,
step=utils.start_process_event_message("Oozie"), param=('cluster', 0))
def refresh_hadoop_nodes(cluster):
nn = vu.get_namenode(cluster)
nn.remote().execute_command(
'sudo su - -c "hdfs dfsadmin -refreshNodes" hadoop')
@cpo.event_wrapper(
@utils.event_wrapper(
True, step=_("Refresh %s nodes") % "YARN", param=('cluster', 0))
def refresh_yarn_nodes(cluster):
rm = vu.get_resourcemanager(cluster)
@ -210,7 +210,7 @@ def _start_oozie(remote):
'sudo su - -c "/opt/oozie/bin/oozied.sh start" hadoop')
@cpo.event_wrapper(
@utils.event_wrapper(
True, step=_("Await %s start up") % "DataNodes", param=('cluster', 0))
def await_datanodes(cluster):
datanodes_count = len(vu.get_datanodes(cluster))
@ -219,9 +219,9 @@ def await_datanodes(cluster):
l_message = _("Waiting on %s datanodes to start up") % datanodes_count
with vu.get_namenode(cluster).remote() as r:
poll_utils.plugin_option_poll(
utils.plugin_option_poll(
cluster, _check_datanodes_count,
c_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
config_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
'remote': r, 'count': datanodes_count})
@ -265,7 +265,8 @@ def _hive_metastore_start(remote):
" --service metastore > /dev/null &' hadoop")
@cpo.event_wrapper(True, step=pu.start_process_event_message("HiveServer"))
@utils.event_wrapper(
True, step=utils.start_process_event_message("HiveServer"))
def start_hiveserver_process(pctx, instance):
with context.set_current_instance_id(instance.instance_id):
with instance.remote() as r:
@ -273,16 +274,15 @@ def start_hiveserver_process(pctx, instance):
_hive_copy_shared_conf(
r, edp.get_hive_shared_conf_path('hadoop'))
if c_helper.is_mysql_enabled(pctx, instance.cluster):
if config_helper.is_mysql_enabled(pctx, instance.cluster):
oozie = vu.get_oozie(instance.node_group.cluster)
if not oozie or instance.hostname() != oozie.hostname():
_start_mysql(r)
version = instance.cluster.hadoop_version
sql_script = files.get_file_text(
sql_script = utils.get_file_text(
'plugins/vanilla/v{}/resources/create_hive_db.sql'.format(
version.replace('.', '_'))
)
version.replace('.', '_')), 'sahara_plugin_vanilla')
sql_script = sql_script.replace(
'{{password}}', u.get_hive_password(instance.cluster))

46
sahara/plugins/vanilla/hadoop2/scaling.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/scaling.py

@ -13,17 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.i18n import _
from sahara.plugins import utils as u
from sahara.plugins.vanilla.hadoop2 import config
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.plugins.vanilla.hadoop2 import run_scripts as run
from sahara.plugins.vanilla.hadoop2 import utils as pu
from sahara.plugins.vanilla import utils as vu
from sahara.swift import swift_helper
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import poll_utils
from sahara.plugins import swift_helper
from sahara.plugins import utils
from sahara_plugin_vanilla.i18n import _
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import config
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import config_helper
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import run_scripts as run
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import utils as pu
from sahara_plugin_vanilla.plugins.vanilla import utils as vu
HADOOP_CONF_DIR = config.HADOOP_CONF_DIR
@ -48,19 +45,20 @@ def _get_instances_with_service(instances, service):
if service in instance.node_group.node_processes]
@cpo.event_wrapper(True, step=_("Update include files"), param=('cluster', 0))
@utils.event_wrapper(
True, step=_("Update include files"), param=('cluster', 0))
def _update_include_files(cluster, dec_instances=None):
dec_instances = dec_instances or []
dec_instances_ids = [instance.id for instance in dec_instances]
instances = u.get_instances(cluster)
instances = utils.get_instances(cluster)
inst_filter = lambda inst: inst.id not in dec_instances_ids
datanodes = filter(inst_filter, vu.get_datanodes(cluster))
nodemanagers = filter(inst_filter, vu.get_nodemanagers(cluster))
dn_hosts = u.generate_fqdn_host_names(datanodes)
nm_hosts = u.generate_fqdn_host_names(nodemanagers)
dn_hosts = utils.generate_fqdn_host_names(datanodes)
nm_hosts = utils.generate_fqdn_host_names(nodemanagers)
for instance in instances:
with instance.remote() as r:
r.execute_command(
@ -97,9 +95,9 @@ def decommission_nodes(pctx, cluster, instances):
def _update_exclude_files(cluster, instances):
datanodes = _get_instances_with_service(instances, 'datanode')
nodemanagers = _get_instances_with_service(instances, 'nodemanager')
dn_hosts = u.generate_fqdn_host_names(datanodes)
nm_hosts = u.generate_fqdn_host_names(nodemanagers)
for instance in u.get_instances(cluster):
dn_hosts = utils.generate_fqdn_host_names(datanodes)
nm_hosts = utils.generate_fqdn_host_names(nodemanagers)
for instance in utils.get_instances(cluster):
with instance.remote() as r:
r.execute_command(
'sudo su - -c "echo \'%s\' > %s/dn-exclude" hadoop' % (
@ -110,7 +108,7 @@ def _update_exclude_files(cluster, instances):
def _clear_exclude_files(cluster):
for instance in u.get_instances(cluster):
for instance in utils.get_instances(cluster):
with instance.remote() as r:
r.execute_command(
'sudo su - -c "echo > %s/dn-exclude" hadoop' % HADOOP_CONF_DIR)
@ -127,21 +125,21 @@ def is_decommissioned(cluster, check_func, instances):
def _check_decommission(cluster, instances, check_func, option):
poll_utils.plugin_option_poll(
utils.plugin_option_poll(
cluster, is_decommissioned, option, _("Wait for decommissioning"),
5, {'cluster': cluster, 'check_func': check_func,
'instances': instances})
@cpo.event_wrapper(
@utils.event_wrapper(
True, step=_("Decommission %s") % "NodeManagers", param=('cluster', 0))
def _check_nodemanagers_decommission(cluster, instances):
_check_decommission(cluster, instances, pu.get_nodemanagers_status,
c_helper.NODEMANAGERS_DECOMMISSIONING_TIMEOUT)
config_helper.NODEMANAGERS_DECOMMISSIONING_TIMEOUT)
@cpo.event_wrapper(
@utils.event_wrapper(
True, step=_("Decommission %s") % "DataNodes", param=('cluster', 0))
def _check_datanodes_decommission(cluster, instances):
_check_decommission(cluster, instances, pu.get_datanodes_status,
c_helper.DATANODES_DECOMMISSIONING_TIMEOUT)
config_helper.DATANODES_DECOMMISSIONING_TIMEOUT)

11
sahara/plugins/vanilla/hadoop2/starting_scripts.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/starting_scripts.py

@ -14,9 +14,8 @@
# limitations under the License.
from sahara.plugins import utils
from sahara.plugins.vanilla.hadoop2 import run_scripts as run
from sahara.plugins.vanilla import utils as vu
from sahara.utils import cluster_progress_ops as cpo
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import run_scripts as run
from sahara_plugin_vanilla.plugins.vanilla import utils as vu
def start_namenode(cluster):
@ -24,7 +23,7 @@ def start_namenode(cluster):
_start_namenode(nn)
@cpo.event_wrapper(
@utils.event_wrapper(
True, step=utils.start_process_event_message('NameNode'))
def _start_namenode(nn):
run.format_namenode(nn)
@ -37,7 +36,7 @@ def start_secondarynamenode(cluster):
_start_secondarynamenode(snn)
@cpo.event_wrapper(
@utils.event_wrapper(
True, step=utils.start_process_event_message("SecondaryNameNodes"))
def _start_secondarynamenode(snn):
run.start_hadoop_process(snn, 'secondarynamenode')
@ -49,7 +48,7 @@ def start_resourcemanager(cluster):
_start_resourcemanager(rm)
@cpo.event_wrapper(
@utils.event_wrapper(
True, step=utils.start_process_event_message('ResourceManager'))
def _start_resourcemanager(snn):
run.start_yarn_process(snn, 'resourcemanager')

9
sahara/plugins/vanilla/hadoop2/utils.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/utils.py

@ -17,12 +17,11 @@ import re
from oslo_log import log as logging
from sahara import conductor as cond
from sahara import context
from sahara.plugins.vanilla import utils as u
from sahara.service.castellan import utils as castellan
from sahara.plugins import castellan_utils as castellan
from sahara.plugins import conductor
from sahara.plugins import context
from sahara_plugin_vanilla.plugins.vanilla import utils as u
conductor = cond.API
LOG = logging.getLogger(__name__)

11
sahara/plugins/vanilla/hadoop2/validation.py → sahara_plugin_vanilla/plugins/vanilla/hadoop2/validation.py

@ -13,12 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.i18n import _
from sahara.plugins import exceptions as ex
from sahara.plugins import utils as u
from sahara.plugins.vanilla.hadoop2 import config_helper as cu
from sahara.plugins.vanilla import utils as vu
from sahara.utils import general as gu
from sahara_plugin_vanilla.i18n import _
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import config_helper as cu
from sahara_plugin_vanilla.plugins.vanilla import utils as vu
def validate_cluster_creating(pctx, cluster):
@ -95,7 +94,7 @@ def validate_additional_ng_scaling(cluster, additional):
scalable_processes = _get_scalable_processes()
for ng_id in additional:
ng = gu.get_by_id(cluster.node_groups, ng_id)
ng = u.get_by_id(cluster.node_groups, ng_id)
if not set(ng.node_processes).issubset(scalable_processes):
msg = _("Vanilla plugin cannot scale nodegroup with processes: %s")
raise ex.NodeGroupCannotBeScaled(ng.name,
@ -140,7 +139,7 @@ def validate_zookeeper_node_count(zk_ng, existing, additional):
zk_amount += ng.count
for ng_id in additional:
ng = gu.get_by_id(zk_ng, ng_id)
ng = u.get_by_id(zk_ng, ng_id)
if "zookeeper" in ng.node_processes:
zk_amount += ng.count

4
sahara/plugins/vanilla/plugin.py → sahara_plugin_vanilla/plugins/vanilla/plugin.py

@ -15,9 +15,9 @@
import copy
from sahara.i18n import _
from sahara.plugins import provisioning as p
from sahara.plugins.vanilla import versionfactory as vhf
from sahara_plugin_vanilla.i18n import _
from sahara_plugin_vanilla.plugins.vanilla import versionfactory as vhf
class VanillaProvider(p.ProvisioningPluginBase):

2
sahara/plugins/vanilla/utils.py → sahara_plugin_vanilla/plugins/vanilla/utils.py

@ -16,8 +16,8 @@
from oslo_utils import uuidutils
from sahara.plugins import castellan_utils as castellan
from sahara.plugins import utils as u
from sahara.service.castellan import utils as castellan
def get_namenode(cluster):

0
sahara/plugins/vanilla/v2_7_1/__init__.py → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/__init__.py

44
sahara/plugins/vanilla/v2_7_1/config_helper.py → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/config_helper.py

@ -19,34 +19,40 @@ from oslo_config import cfg
import six
from sahara.plugins import provisioning as p
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.utils import xmlutils as x
from sahara.plugins import utils
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import config_helper
CONF = cfg.CONF
CONF.import_opt("enable_data_locality", "sahara.topology.topology_helper")
CORE_DEFAULT = x.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/core-default.xml')
CORE_DEFAULT = utils.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/core-default.xml',
'sahara_plugin_vanilla')
HDFS_DEFAULT = x.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/hdfs-default.xml')
HDFS_DEFAULT = utils.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/hdfs-default.xml',
'sahara_plugin_vanilla')
MAPRED_DEFAULT = x.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/mapred-default.xml')
MAPRED_DEFAULT = utils.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/mapred-default.xml',
'sahara_plugin_vanilla')
YARN_DEFAULT = x.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/yarn-default.xml')
YARN_DEFAULT = utils.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/yarn-default.xml',
'sahara_plugin_vanilla')
OOZIE_DEFAULT = x.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/oozie-default.xml')
OOZIE_DEFAULT = utils.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/oozie-default.xml',
'sahara_plugin_vanilla')
HIVE_DEFAULT = x.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/hive-default.xml')
HIVE_DEFAULT = utils.load_hadoop_xml_defaults(
'plugins/vanilla/v2_7_1/resources/hive-default.xml',
'sahara_plugin_vanilla')
_default_executor_classpath = ":".join(
['/opt/hadoop/share/hadoop/tools/lib/hadoop-openstack-2.7.1.jar'])
SPARK_CONFS = copy.deepcopy(c_helper.SPARK_CONFS)
SPARK_CONFS = copy.deepcopy(config_helper.SPARK_CONFS)
SPARK_CONFS['Spark']['OPTIONS'].append(
{
@ -87,15 +93,15 @@ ENV_CONFS = {
}
# Initialise plugin Hadoop configurations
PLUGIN_XML_CONFIGS = c_helper.init_xml_configs(XML_CONFS)
PLUGIN_ENV_CONFIGS = c_helper.init_env_configs(ENV_CONFS)
PLUGIN_XML_CONFIGS = config_helper.init_xml_configs(XML_CONFS)
PLUGIN_ENV_CONFIGS = config_helper.init_env_configs(ENV_CONFS)
def _init_all_configs():
configs = []
configs.extend(PLUGIN_XML_CONFIGS)
configs.extend(PLUGIN_ENV_CONFIGS)
configs.extend(c_helper.PLUGIN_GENERAL_CONFIGS)
configs.extend(config_helper.PLUGIN_GENERAL_CONFIGS)
configs.extend(_get_spark_configs())
configs.extend(_get_zookeeper_configs())
return configs
@ -124,7 +130,7 @@ def _get_spark_configs():
def _get_zookeeper_configs():
zk_configs = []
for service, config_items in six.iteritems(c_helper.ZOOKEEPER_CONFS):
for service, config_items in six.iteritems(config_helper.ZOOKEEPER_CONFS):
for item in config_items['OPTIONS']:
cfg = p.Config(name=item["name"],
description=item["description"],

25
sahara/plugins/vanilla/v2_7_1/edp_engine.py → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/edp_engine.py

@ -14,34 +14,33 @@
# limitations under the License.
import os
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.plugins import edp
from sahara.plugins import exceptions as ex
from sahara.plugins import utils as plugin_utils
from sahara.plugins.vanilla import confighints_helper as ch_helper
from sahara.plugins.vanilla.hadoop2 import edp_engine
from sahara.plugins.vanilla import utils as v_utils
from sahara.service.edp.spark import engine as edp_spark_engine
from sahara.utils import edp
from sahara_plugin_vanilla.i18n import _
from sahara_plugin_vanilla.plugins.vanilla import confighints_helper as chh
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import edp_engine
from sahara_plugin_vanilla.plugins.vanilla import utils as v_utils
class EdpOozieEngine(edp_engine.EdpOozieEngine):
@staticmethod
def get_possible_job_config(job_type):
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
return {'job_config': ch_helper.get_possible_hive_config_from(
return {'job_config': chh.get_possible_hive_config_from(
'plugins/vanilla/v2_7_1/resources/hive-default.xml')}
if edp.compare_job_type(job_type,
edp.JOB_TYPE_MAPREDUCE,
edp.JOB_TYPE_MAPREDUCE_STREAMING):
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
return {'job_config': chh.get_possible_mapreduce_config_from(
'plugins/vanilla/v2_7_1/resources/mapred-default.xml')}
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
return {'job_config': ch_helper.get_possible_pig_config_from(
return {'job_config': chh.get_possible_pig_config_from(
'plugins/vanilla/v2_7_1/resources/mapred-default.xml')}
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
class EdpSparkEngine(edp_spark_engine.SparkJobEngine):
class EdpSparkEngine(edp.PluginsSparkJobEngine):
edp_base_version = "2.7.1"
@ -68,13 +67,13 @@ class EdpSparkEngine(edp_spark_engine.SparkJobEngine):
@staticmethod
def job_type_supported(job_type):
return (job_type in
edp_spark_engine.SparkJobEngine.get_supported_job_types())
edp.PluginsSparkJobEngine.get_supported_job_types())
def validate_job_execution(self, cluster, job, data):
if (not self.edp_supported(cluster.hadoop_version) or
not v_utils.get_spark_history_server(cluster)):
raise ex.InvalidDataException(
raise ex.PluginInvalidDataException(
_('Spark {base} or higher required to run {type} jobs').format(
base=EdpSparkEngine.edp_base_version, type=job.type))

0
sahara/plugins/vanilla/v2_7_1/resources/README.rst → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/README.rst

0
sahara/plugins/vanilla/v2_7_1/resources/core-default.xml → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/core-default.xml

0
sahara/plugins/vanilla/v2_7_1/resources/create_hive_db.sql → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/create_hive_db.sql

0
sahara/plugins/vanilla/v2_7_1/resources/hdfs-default.xml → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/hdfs-default.xml

0
sahara/plugins/vanilla/v2_7_1/resources/hive-default.xml → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/hive-default.xml

0
sahara/plugins/vanilla/v2_7_1/resources/mapred-default.xml → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/mapred-default.xml

0
sahara/plugins/vanilla/v2_7_1/resources/oozie-default.xml → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/oozie-default.xml

0
sahara/plugins/vanilla/v2_7_1/resources/yarn-default.xml → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/resources/yarn-default.xml

62
sahara/plugins/vanilla/v2_7_5/versionhandler.py → sahara_plugin_vanilla/plugins/vanilla/v2_7_1/versionhandler.py

@ -15,34 +15,32 @@
from oslo_config import cfg
from sahara import conductor
from sahara import context
from sahara.plugins import conductor
from sahara.plugins import context
from sahara.plugins import swift_helper
from sahara.plugins import utils
from sahara.plugins.vanilla import abstractversionhandler as avm
from sahara.plugins.vanilla.hadoop2 import config as c
from sahara.plugins.vanilla.hadoop2 import keypairs
from sahara.plugins.vanilla.hadoop2 import recommendations_utils as ru
from sahara.plugins.vanilla.hadoop2 import run_scripts as run
from sahara.plugins.vanilla.hadoop2 import scaling as sc
from sahara.plugins.vanilla.hadoop2 import starting_scripts as s_scripts
from sahara.plugins.vanilla.hadoop2 import utils as u
from sahara.plugins.vanilla.hadoop2 import validation as vl
from sahara.plugins.vanilla import utils as vu
from sahara.plugins.vanilla.v2_7_5 import config_helper as c_helper
from sahara.plugins.vanilla.v2_7_5 import edp_engine
from sahara.swift import swift_helper
from sahara.utils import cluster as cluster_utils
conductor = conductor.API
from sahara_plugin_vanilla.plugins.vanilla import abstractversionhandler as avm
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import config as c
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import keypairs
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import recommendations_utils
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import run_scripts as run
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import scaling as sc
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import starting_scripts
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import utils as u
from sahara_plugin_vanilla.plugins.vanilla.hadoop2 import validation as vl
from sahara_plugin_vanilla.plugins.vanilla import utils as vu
from sahara_plugin_vanilla.plugins.vanilla.v2_7_1 import config_helper