Delete oslo-incubator code & switch to oslo.service

Change-Id: Ie0e70d3f603d61e8c7ad95b7c548008776c1af51
This commit is contained in:
Zhijiang Hu 2016-10-08 06:46:07 -04:00
parent 4981d083c4
commit 94d8230a9e
28 changed files with 14 additions and 4851 deletions

View File

@ -1,348 +0,0 @@
Aaron Rosen <aaronorosen@gmail.com>
Abhijeet Malawade <Abhijeet.Malawade@nttdata.com>
Abhishek Kekane <abhishek.kekane@nttdata.com>
Adam Gandelman <adam.gandelman@canonical.com>
Adam Gandelman <adamg@ubuntu.com>
Alberto Planas <aplanas@gmail.com>
Alessandro Pilotti <ap@pilotti.it>
Alessio Ababilov <aababilo@yahoo-inc.com>
Alessio Ababilov <aababilov@griddynamics.com>
Alex Gaynor <alex.gaynor@gmail.com>
Alex Meade <mr.alex.meade@gmail.com>
Alexander Gordeev <agordeev@mirantis.com>
Alexander Tivelkov <ativelkov@mirantis.com>
Amala Basha <princessbasha@gmail.com>
AmalaBasha <amala.alungal@RACKSPACE.COM>
AmalaBasha <princessbasha@gmail.com>
Anastasia Vlaskina <alatynskaya@mirantis.com>
Andreas Jaeger <aj@suse.de>
Andrew Hutchings <andrew@linuxjedi.co.uk>
Andrew Melton <andrew.melton@rackspace.com>
Andrew Tranquada <andrew.tranquada@rackspace.com>
Andrey Brindeyev <abrindeyev@griddynamics.com>
Andy McCrae <andy.mccrae@gmail.com>
Anita Kuno <akuno@lavabit.com>
Arnaud Legendre <arnaudleg@gmail.com>
Artur Svechnikov <asvechnikov@mirantis.com>
Ashish Jain <ashish.jain14@wipro.com>
Ashwini Shukla <ashwini.shukla@rackspace.com>
Aswad Rangnekar <aswad.rangnekar@nttdata.com>
Attila Fazekas <afazekas@redhat.com>
Avinash Prasad <avinash.prasad@nttdata.com>
Balazs Gibizer <balazs.gibizer@ericsson.com>
Bartosz Fic <bartosz.fic@intel.com>
Ben Nemec <bnemec@us.ibm.com>
Ben Roble <ben.roble@rackspace.com>
Bernhard M. Wiedemann <bwiedemann@suse.de>
Bhuvan Arumugam <bhuvan@apache.org>
Boris Pavlovic <boris@pavlovic.me>
Brant Knudson <bknudson@us.ibm.com>
Brian Cline <bcline@softlayer.com>
Brian D. Elliott <bdelliott@gmail.com>
Brian Elliott <bdelliott@gmail.com>
Brian Elliott <brian.elliott@rackspace.com>
Brian Lamar <brian.lamar@rackspace.com>
Brian Rosmaita <brian.rosmaita@rackspace.com>
Brian Waldon <brian.waldon@rackspace.com>
Cerberus <matt.dietz@rackspace.com>
Chang Bo Guo <guochbo@cn.ibm.com>
ChangBo Guo(gcb) <eric.guo@easystack.cn>
Chmouel Boudjnah <chmouel@chmouel.com>
Chris Allnutt <chris.allnutt@rackspace.com>
Chris Behrens <cbehrens@codestud.com>
Chris Buccella <buccella@linux.vnet.ibm.com>
Chris Buccella <chris.buccella@antallagon.com>
Chris Fattarsi <chris.fattarsi@pistoncloud.com>
Christian Berendt <berendt@b1-systems.de>
Christopher MacGown <chris@pistoncloud.com>
Chuck Short <chuck.short@canonical.com>
Cindy Pallares <cindy.pallaresq@gmail.com>
Clark Boylan <clark.boylan@gmail.com>
Cory Wright <corywright@gmail.com>
Dan Prince <dprince@redhat.com>
Danny Al-Gaaf <danny.al-gaaf@bisect.de>
Davanum Srinivas <davanum@gmail.com>
Davanum Srinivas <dims@linux.vnet.ibm.com>
Dave Chen <wei.d.chen@intel.com>
Dave Walker (Daviey) <email@daviey.com>
David Koo <david.koo@huawei.com>
David Peraza <dperaza@linux.vnet.ibm.com>
David Ripton <dripton@redhat.com>
Dean Troyer <dtroyer@gmail.com>
DennyZhang <denny@unitedstack.com>
Derek Higgins <derekh@redhat.com>
Dirk Mueller <dirk@dmllr.de>
Dmitry Kulishenko <dmitryk@yahoo-inc.com>
Dolph Mathews <dolph.mathews@gmail.com>
Donal Lafferty <donal.lafferty@citrix.com>
Doron Chen <cdoron@il.ibm.com>
Doug Hellmann <doug.hellmann@dreamhost.com>
Doug Hellmann <doug@doughellmann.com>
Duncan McGreggor <duncan@dreamhost.com>
Eddie Sheffield <eddie.sheffield@rackspace.com>
Edward Hope-Morley <edward.hope-morley@canonical.com>
Eldar Nugaev <enugaev@griddynamics.com>
Elena Ezhova <eezhova@mirantis.com>
Eoghan Glynn <eglynn@redhat.com>
Eric Brown <browne@vmware.com>
Eric Windisch <eric@cloudscaling.com>
Erno Kuvaja <jokke@hp.com>
Eugeniya Kudryashova <ekudryashova@mirantis.com>
Ewan Mellor <ewan.mellor@citrix.com>
Fabio M. Di Nitto <fdinitto@redhat.com>
Fei Long Wang <flwang@catalyst.net.nz>
Fei Long Wang <flwang@cn.ibm.com>
Fengqian Gao <fengqian.gao@intel.com>
Flaper Fesp <flaper87@gmail.com>
Flavio Percoco <flaper87@gmail.com>
Florent Flament <florent.flament-ext@cloudwatt.com>
Gabriel Hurley <gabriel@strikeawe.com>
Gauvain Pocentek <gauvain.pocentek@objectif-libre.com>
Geetika Batra <geetika791@gmail.com>
George Peristerakis <george.peristerakis@enovance.com>
Georgy Okrokvertskhov <gokrokvertskhov@mirantis.com>
Gerardo Porras <gporras@yahoo-inc.com>
Gorka Eguileor <geguileo@redhat.com>
Grant Murphy <grant.murphy@hp.com>
Haiwei Xu <xu-haiwei@mxw.nes.nec.co.jp>
He Yongli <yongli.he@intel.com>
Hemanth Makkapati <hemanth.makkapati@mailtrust.com>
Hemanth Makkapati <hemanth.makkapati@rackspace.com>
Hengqing Hu <hudayou@hotmail.com>
Hirofumi Ichihara <ichihara.hirofumi@lab.ntt.co.jp>
Hui Xiang <hui.xiang@canonical.com>
Ian Cordasco <ian.cordasco@rackspace.com>
Iccha Sethi <iccha.sethi@rackspace.com>
Igor A. Lukyanenkov <ilukyanenkov@griddynamics.com>
Ihar Hrachyshka <ihrachys@redhat.com>
Ildiko Vancsa <ildiko.vancsa@ericsson.com>
Ilya Pekelny <ipekelny@mirantis.com>
Inessa Vasilevskaya <ivasilevskaya@mirantis.com>
Ionuț Arțăriși <iartarisi@suse.cz>
Isaku Yamahata <yamahata@valinux.co.jp>
J. Daniel Schmidt <jdsn@suse.de>
Jakub Ruzicka <jruzicka@redhat.com>
James Carey <jecarey@us.ibm.com>
James E. Blair <jeblair@hp.com>
James Li <yueli.m@gmail.com>
James Morgan <james.morgan@rackspace.com>
James Polley <jp@jamezpolley.com>
Jamie Lennox <jamielennox@redhat.com>
Jared Culp <jared.culp@rackspace.com>
Jasakov Artem <ayasakov@mirantis.com>
Jason Koelker <jason@koelker.net>
Jason Kölker <jason@koelker.net>
Jay Pipes <jaypipes@gmail.com>
Jeremy Stanley <fungi@yuggoth.org>
Jesse Andrews <anotherjesse@gmail.com>
Jesse J. Cook <jesse.cook@rackspace.com>
Jia Dong <jiadong.jia@huawei.com>
Jinwoo 'Joseph' Suh <jsuh@isi.edu>
Joe Gordon <joe.gordon0@gmail.com>
Joe Gordon <jogo@cloudscaling.com>
Johannes Erdfelt <johannes.erdfelt@rackspace.com>
John Bresnahan <jbresnah@redhat.com>
John Lenihan <john.lenihan@hp.com>
John Warren <jswarren@us.ibm.com>
Jon Bernard <jobernar@redhat.com>
Joseph Suh <jsuh@isi.edu>
Josh Durgin <josh.durgin@dreamhost.com>
Josh Durgin <josh.durgin@inktank.com>
Josh Kearney <josh@jk0.org>
Joshua Harlow <harlowja@yahoo-inc.com>
Juan Manuel Olle <juan.m.olle@intel.com>
Juerg Haefliger <juerg.haefliger@hp.com>
Julia Varlamova <jvarlamova@mirantis.com>
Julien Danjou <julien@danjou.info>
Jun Hong Li <junhongl@cn.ibm.com>
Justin Santa Barbara <justin@fathomdb.com>
Justin Shepherd <jshepher@rackspace.com>
KIYOHIRO ADACHI <adachi@mxs.nes.nec.co.jp>
Kamil Rykowski <kamil.rykowski@intel.com>
Kasey Alusi <kasey.alusi@gmail.com>
Ken Pepple <ken.pepple@gmail.com>
Ken Thomas <krt@yahoo-inc.com>
Kent Wang <kent.wang@intel.com>
Keshava Bharadwaj <kb.sankethi@gmail.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Kui Shi <skuicloud@gmail.com>
Kun Huang <gareth@unitedstack.com>
Lakshmi N Sampath <lakshmi.sampath@hp.com>
Lars Gellrich <lars.gellrich@hp.com>
Leam <leam.hall@mailtrust.com>
Leandro I. Costantino <leandro.i.costantino@intel.com>
Liu Yuan <namei.unix@gmail.com>
Lorin Hochstein <lorin@nimbisservices.com>
Louis Taylor <kragniz@gmail.com>
Louis Taylor <louis@kragniz.eu>
Luis A. Garcia <luis@linux.vnet.ibm.com>
Major Hayden <major@mhtx.net>
Mark J. Washenberger <mark.washenberger@markwash.net>
Mark J. Washenberger <mark.washenberger@rackspace.com>
Mark McLoughlin <markmc@redhat.com>
Mark Washenberger <mark.washenberger@rackspace.com>
Martin Kletzander <mkletzan@redhat.com>
Maru Newby <mnewby@internap.com>
Masashi Ozawa <mozawa@cloudian.com>
Matt Dietz <matt.dietz@rackspace.com>
Matt Fischer <matt@mattfischer.com>
Matt Riedemann <mriedem@us.ibm.com>
Matthew Booth <mbooth@redhat.com>
Matthew Treinish <treinish@linux.vnet.ibm.com>
Matthias Schmitz <matthias@sigxcpu.org>
Maurice Leeflang <maurice@leeflang.net>
Mauro S. M. Rodrigues <maurosr@linux.vnet.ibm.com>
Michael J Fork <mjfork@us.ibm.com>
Michael Still <mikal@stillhq.com>
Michal Dulko <michal.dulko@intel.com>
Mike Fedosin <mfedosin@mirantis.com>
Mike Lundy <mike@pistoncloud.com>
Monty Taylor <mordred@inaugust.com>
Nassim Babaci <nassim.babaci@cloudwatt.com>
Nicholas Kuechler <nkuechler@gmail.com>
Nicolas Simonds <nic@metacloud.com>
Nikhil Komawar <nikhil.komawar@rackspace.com>
Nikhil Komawar <nikhilskomawar@gmail.com>
Nikolaj Starodubtsev <nstarodubtsev@mirantis.com>
Noboru Arai <arai@mxa.nes.nec.co.jp>
Noboru arai <arai@mxa.nes.nec.co.jp>
Oleksii Chuprykov <ochuprykov@mirantis.com>
Olena Logvinova <ologvinova@mirantis.com>
Pamela-Rose Virtucio <virtu006@umn.edu>
Patrick Mezard <patrick@mezard.eu>
Paul Bourke <paul-david.bourke@hp.com>
Paul Bourke <pauldbourke@gmail.com>
Paul McMillan <paul.mcmillan@nebula.com>
Pavan Kumar Sunkara <pavan.sss1991@gmail.com>
Pawel Koniszewski <pawel.koniszewski@intel.com>
Pawel Skowron <pawel.skowron@intel.com>
Peng Yong <ppyy@pubyun.com>
Pete Zaitcev <zaitcev@kotori.zaitcev.us>
Pranali Deore <pranali11.deore@nttdata.com>
PranaliDeore <pranali11.deore@nttdata.com>
Pádraig Brady <P@draigBrady.com>
Pádraig Brady <pbrady@redhat.com>
Radu <mateescu@ca.ibm.com>
Rainya Mosher <rainya.mosher@rackspace.com>
Rajesh Tailor <rajesh.tailor@nttdata.com>
Ray Chen <oldsharp@163.com>
Reynolds Chin <benzwt@gmail.com>
Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com>
Robert Collins <rbtcollins@hp.com>
Rohan Kanade <rkanade@redhat.com>
Roman Bogorodskiy <bogorodskiy@gmail.com>
Roman Bogorodskiy <rbogorodskiy@mirantis.com>
Roman Vasilets <rvasilets@mirantis.com>
Rongze Zhu <zrzhit@gmail.com>
RongzeZhu <zrzhit@gmail.com>
Russell Bryant <rbryant@redhat.com>
Russell Sim <russell.sim@gmail.com>
Sabari Kumar Murugesan <smurugesan@vmware.com>
Sam Morrison <sorrison@gmail.com>
Sam Stavinoha <smlstvnh@gmail.com>
Samuel Merritt <sam@swiftstack.com>
Sascha Peilicke <saschpe@gmx.de>
Sascha Peilicke <saschpe@suse.de>
Sathish Nagappan <sathish.nagappan@nebula.com>
Sean Dague <sdague@linux.vnet.ibm.com>
Sean Dague <sean@dague.net>
Sergey Nikitin <snikitin@mirantis.com>
Sergey Skripnick <sskripnick@mirantis.com>
Sergey Vilgelm <svilgelm@mirantis.com>
Sergio Cazzolato <sergio.j.cazzolato@intel.com>
Shane Wang <shane.wang@intel.com>
Soren Hansen <soren.hansen@rackspace.com>
Stan Lagun <slagun@mirantis.com>
Steve Kowalik <steven@wedontsleep.org>
Steve Lewis <stevelle@gmail.com>
Stuart McLaren <stuart.mclaren@hp.com>
Sulochan Acharya <sulochan.acharya@rackspace.co.uk>
Svetlana Shturm <sshturm@mirantis.com>
Taku Fukushima <tfukushima@dcl.info.waseda.ac.jp>
Tatyana Leontovich <tleontov@yahoo-inc.com>
Therese McHale <therese.mchale@hp.com>
Thierry Carrez <thierry@openstack.org>
Thomas Bechtold <tbechtold@suse.com>
Thomas Bechtold <thomasbechtold@jpberlin.de>
Thomas Leaman <thomas.leaman@hp.com>
Tim Daly, Jr <timjr@yahoo-inc.com>
Toan Nguyen <toan.nguyen@rackspace.com>
Tom Hancock <tom.hancock@hp.com>
Tom Leaman <thomas.leaman@hp.com>
Tomas Hancock <tom.hancock@hp.com>
Travis Tripp <travis.tripp@hp.com>
Unmesh Gurjar <unmesh.gurjar@nttdata.com>
Unmesh Gurjar <unmesh.gurjar@vertex.co.in>
Vaibhav Bhatkar <vaibhav.bhatkar@gmail.com>
Venkatesh Sampath <venkatesh.sampath@outlook.com>
Venkatesh Sampath <venkatesh.sampath@rackspace.com>
Victor Morales <victor.morales@intel.com>
Victor Sergeyev <vsergeyev@mirantis.com>
Vincent Untz <vuntz@suse.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Vitaliy Kolosov <vkolosov@griddynamics.com>
Vyacheslav Vakhlyuev <vvakhlyuev@mirantis.com>
Wayne A. Walls <wayne.walls@rackspace.com>
Wayne Okuma <wayne.okuma@hp.com>
Wen Cheng Ma <wenchma@cn.ibm.com>
Wu Wenxiang <wu.wenxiang@99cloud.net>
YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Yaguang Tang <heut2008@gmail.com>
Yanis Guenane <yanis.guenane@enovance.com>
Yufang Zhang <yufang521247@gmail.com>
Yuriy Taraday <yorik.sar@gmail.com>
Yusuke Ide <idzzyzzbi@gmail.com>
ZHANG Hua <zhuadl@cn.ibm.com>
Zhenguo Niu <zhenguo@unitedstack.com>
Zhi Yan Liu <zhiyanl@cn.ibm.com>
ZhiQiang Fan <aji.zqfan@gmail.com>
ZhiQiang Fan <zhiqiang.fan@huawei.com>
Zhiteng Huang <zhiteng.huang@intel.com>
Zhongyue Luo <zhongyue.nah@intel.com>
abhishek-kekane <abhishek.kekane@nttdata.com>
abhishekkekane <abhishek.kekane@nttdata.com>
amalaba <princessbasha@gmail.com>
ankitagrawal <ankit11.agrawal@nttdata.com>
ankur <ankur.gupta@nectechnologies.in>
annegentle <anne@openstack.org>
daisy-ycguo <daisy.ycguo@gmail.com>
eddie-sheffield <eddie.sheffield@rackspace.com>
eos2102 <eos2102@gmail.com>
gengjh <gengjh@cn.ibm.com>
henriquetruta <henrique@lsd.ufcg.edu.br>
huangtianhua <huangtianhua@huawei.com>
hzrandd <82433422@qq.com>
iccha <iccha.sethi@rackspace.com>
iccha-sethi <iccha.sethi@rackspace.com>
iccha.sethi <iccha.sethi@rackspace.com>
isethi <iccha.sethi@rackspace.com>
jakedahn <jake@ansolabs.com>
jare6412 <jared.culp@mailtrust.com>
jaypipes@gmail.com <>
jola-mirecka <jola.mirecka@hp.com>
lawrancejing <lawrancejing@gmail.com>
leseb <sebastien.han@enovance.com>
ling-yun <zengyunling@huawei.com>
liuqing <jing.liuqing@99cloud.net>
liyingjun <liyingjun1988@gmail.com>
liyingjun <yingjun.li@kylin-cloud.com>
lizheming <lizheming.li@huawei.com>
llg8212 <lilinguo@huawei.com>
ls1175 <liusheng@huawei.com>
marianitadn <maria.nita.dn@gmail.com>
mathrock <nathanael.i.burton.work@gmail.com>
nanhai liao <nanhai.liao@kylin-cloud.com>
pran1990 <praneshpg@gmail.com>
ravikumar-venkatesan <ravikumar.venkatesan@hp.com>
sai krishna sripada <krishna1256@gmail.com>
sarvesh-ranjan <saranjan@cisco.com>
shreeduth-awasthi <shreeduth.awasthi@tcs.com>
shrutiranade38 <shrutiranade38@gmail.com>
shu,xinxin <xinxin.shu@intel.com>
sridevik <koushiksridevi8@gmail.com>
sridevik <sridevi.koushik@rackspace.com>
tanlin <lin.tan@intel.com>
tmcpeak <travis_mcpeak@symantec.com>
wanghong <w.wanghong@huawei.com>
yangxurong <yangxurong@huawei.com>

View File

@ -13,4 +13,4 @@ Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub: Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/glance https://bugs.launchpad.net/daisycloud-core

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,11 @@
glance Style Commandments daisycloud-core Style Commandments
======================= =======================
- Step 1: Read the OpenStack Style Commandments - Step 1: Read the OpenStack Style Commandments
http://docs.openstack.org/developer/hacking/ http://docs.openstack.org/developer/hacking/
- Step 2: Read on - Step 2: Read on
glance Specific Commandments daisycloud-core Specific Commandments
-------------------------- --------------------------
- [G316] Change assertTrue(isinstance(A, B)) by optimal assert like - [G316] Change assertTrue(isinstance(A, B)) by optimal assert like

View File

@ -1,11 +1,9 @@
====== ======
Glance Daisy
====== ======
Glance is a project that defines services for discovering, registering, Daisy(Openstack project name: daisycloud-core) provides automated deployment and
retrieving and storing virtual machine images. management of OpenStack and other distributed systems.
Use the following resources to learn more: ## Website
http://www.daisycloud.org
* `Official Glance documentation <http://docs.openstack.org/developer/glance/>`_
* `Official Client documentation <http://docs.openstack.org/developer/python-glanceclient/>`_

View File

@ -37,7 +37,7 @@ from daisy.common import config
from daisy.common import exception from daisy.common import exception
from daisy.common import wsgi from daisy.common import wsgi
from daisy import notifier from daisy import notifier
from daisy.openstack.common import systemd from oslo_service import systemd
# Monkey patch socket, time, select, threads # Monkey patch socket, time, select, threads
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, eventlet.patcher.monkey_patch(all=False, socket=True, time=True,

View File

@ -28,7 +28,7 @@ from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from daisy.common import exception from daisy.common import exception
from daisy.common import config from daisy.common import config
from daisy.openstack.common import loopingcall from oslo_service import loopingcall
from daisy.orchestration import manager from daisy.orchestration import manager
import six import six

View File

@ -34,7 +34,7 @@ from daisy.common import config
from daisy.common import utils from daisy.common import utils
from daisy.common import wsgi from daisy.common import wsgi
from daisy import notifier from daisy import notifier
from daisy.openstack.common import systemd from oslo_service import systemd
# Monkey patch socket and time # Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)

View File

@ -1,122 +0,0 @@
# Copyright 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A mixin that validates the given body for jsonpatch-compatibility.
The methods supported are limited to listed in METHODS_ALLOWED
"""
import re
import jsonschema
import daisy.common.exception as exc
from daisy.openstack.common._i18n import _
class JsonPatchValidatorMixin(object):
# a list of allowed methods allowed according to RFC 6902
ALLOWED = ["replace", "test", "remove", "add", "copy"]
PATH_REGEX_COMPILED = re.compile("^/[^/]+(/[^/]+)*$")
def __init__(self, methods_allowed=["replace", "remove"]):
self.schema = self._gen_schema(methods_allowed)
self.methods_allowed = [m for m in methods_allowed
if m in self.ALLOWED]
@staticmethod
def _gen_schema(methods_allowed):
"""
Generates a jsonschema for jsonpatch request based on methods_allowed
"""
# op replace needs no 'value' param, so needs a special schema if
# present in methods_allowed
basic_schema = {
"type": "array",
"items": {"properties": {"op": {"type": "string",
"enum": methods_allowed},
"path": {"type": "string"},
"value": {"type": ["string",
"object",
"integer",
"array",
"boolean"]}
},
"required": ["op", "path", "value"],
"type": "object"},
"$schema": "http://json-schema.org/draft-04/schema#"
}
if "remove" in methods_allowed:
methods_allowed.remove("remove")
no_remove_op_schema = {
"type": "object",
"properties": {
"op": {"type": "string", "enum": methods_allowed},
"path": {"type": "string"},
"value": {"type": ["string", "object",
"integer", "array", "boolean"]}
},
"required": ["op", "path", "value"]}
op_remove_only_schema = {
"type": "object",
"properties": {
"op": {"type": "string", "enum": ["remove"]},
"path": {"type": "string"}
},
"required": ["op", "path"]}
basic_schema = {
"type": "array",
"items": {
"oneOf": [no_remove_op_schema, op_remove_only_schema]},
"$schema": "http://json-schema.org/draft-04/schema#"
}
return basic_schema
def validate_body(self, body):
try:
jsonschema.validate(body, self.schema)
# now make sure everything is ok with path
return [{"path": self._decode_json_pointer(e["path"]),
"value": e.get("value", None),
"op": e["op"]} for e in body]
except jsonschema.ValidationError:
raise exc.InvalidJsonPatchBody(body=body, schema=self.schema)
def _check_for_path_errors(self, pointer):
if not re.match(self.PATH_REGEX_COMPILED, pointer):
msg = _("Json path should start with a '/', "
"end with no '/', no 2 subsequent '/' are allowed.")
raise exc.InvalidJsonPatchPath(path=pointer, explanation=msg)
if re.search('~[^01]', pointer) or pointer.endswith('~'):
msg = _("Pointer contains '~' which is not part of"
" a recognized escape sequence [~0, ~1].")
raise exc.InvalidJsonPatchPath(path=pointer, explanation=msg)
def _decode_json_pointer(self, pointer):
"""Parses a json pointer. Returns a pointer as a string.
Json Pointers are defined in
http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer .
The pointers use '/' for separation between object attributes.
A '/' character in an attribute name is encoded as "~1" and
a '~' character is encoded as "~0".
"""
self._check_for_path_errors(pointer)
ret = []
for part in pointer.lstrip('/').split('/'):
ret.append(part.replace('~1', '/').replace('~0', '~').strip())
return '/'.join(ret)

View File

@ -1,16 +0,0 @@
oslo-incubator
--------------
A number of modules from oslo-incubator are imported into this project.
You can clone the oslo-incubator repository using the following url:
git://git.openstack.org/openstack/oslo-incubator
These modules are "incubating" in oslo-incubator and are kept in sync
with the help of oslo-incubator's update.py script. See:
https://wiki.openstack.org/wiki/Oslo#Syncing_Code_from_Incubator
The copy of the code should never be directly modified here. Please
always update oslo-incubator first and then run the script to copy
the changes across.

View File

@ -1,45 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
try:
import oslo_i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
_translators = oslo_i18n.TranslatorFactory(domain='glance')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
except ImportError:
# NOTE(dims): Support for cases where a project wants to use
# code from oslo-incubator, but is not ready to be internationalized
# (like tempest)
_ = _LI = _LW = _LE = _LC = lambda x: x

View File

@ -1,151 +0,0 @@
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import copy
import errno
import gc
import logging
import os
import pprint
import socket
import sys
import traceback
import eventlet.backdoor
import greenlet
from oslo_config import cfg
from daisy.openstack.common._i18n import _LI
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo-config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port

View File

@ -1,149 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import logging
import os
import stat
import tempfile
from oslo_utils import excutils
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
def ensure_tree(path, mode=DEFAULT_MODE):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
:param mode: Directory creation permissions
"""
try:
os.makedirs(path, mode)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload:
delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_cached_file(filename):
"""Delete cached file if present.
:param filename: filename to delete
"""
global _FILE_CACHE
if filename in _FILE_CACHE:
del _FILE_CACHE[filename]
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return open(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path

View File

@ -1,45 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()

View File

@ -1,147 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
import time
from eventlet import event
from eventlet import greenthread
from daisy.openstack.common._i18n import _LE, _LW
LOG = logging.getLogger(__name__)
# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
# with time.time() called in the standard logging module
# during unittests.
_ts = lambda: time.time()
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCallBase.
The poll-function passed to LoopingCallBase can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCallBase.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCallBase.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = _ts()
self.f(*self.args, **self.kw)
end = _ts()
if not self._running:
break
delay = end - start - interval
if delay > 0:
LOG.warn(_LW('task %(func_name)r run outlasted '
'interval by %(delay).2f sec'),
{'func_name': self.f, 'delay': delay})
greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_LE('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug('Dynamic looping call %(func_name)r sleeping '
'for %(idle).02f seconds',
{'func_name': self.f, 'idle': idle})
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_LE('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done

View File

@ -1,495 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo_config import cfg
from daisy.openstack.common import eventlet_backdoor
from daisy.openstack.common._i18n import _LE, _LI, _LW
from daisy.openstack.common import systemd
from daisy.openstack.common import threadgroup
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self):
"""Constructor."""
self.children = {}
self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Block while any of child processes have exited
pid, status = os.waitpid(0, 0)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self, graceful=False):
self.tg.stop(graceful)
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher

View File

@ -1,105 +0,0 @@
# Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import logging
import os
import socket
import sys
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style notification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occurred
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)

View File

@ -1,149 +0,0 @@
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
import eventlet
from eventlet import greenpool
from daisy.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object):
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
return th
def thread_done(self, thread):
self.threads.remove(thread)
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)

View File

@ -1,8 +0,0 @@
[DEFAULT]
# The list of modules to copy from oslo-incubator
module=install_venv_common
module=service
# The base module to hold the copy of openstack.common
base=daisy

View File

@ -1,30 +0,0 @@
Rally job related files
=======================
This directory contains rally tasks and plugins that are run by OpenStack CI.
Structure
---------
* plugins - directory where you can add rally plugins. Almost everything in
Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic
cleanup resources, ....
* extra - all files from this directory will be copy pasted to gates, so you
are able to use absolute paths in rally tasks.
Files will be located in ~/.rally/extra/*
* glance.yaml is a task that is run in gates against OpenStack (nova network)
deployed by DevStack
Useful links
------------
* More about Rally: https://rally.readthedocs.org/en/latest/
* How to add rally-gates: https://rally.readthedocs.org/en/latest/rally_gatejob.html
* About plugins: https://rally.readthedocs.org/en/latest/plugins.html
* Plugin samples: https://github.com/stackforge/rally/tree/master/doc/samples/plugins

View File

@ -1,5 +0,0 @@
Extra files
===========
All files from this directory will be copy pasted to gates, so you are able to
use absolute path in rally tasks. Files will be in ~/.rally/extra/*

View File

@ -1,45 +0,0 @@
---
GlanceImages.create_and_list_image:
-
args:
image_location: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img"
container_format: "bare"
disk_format: "qcow2"
runner:
type: "constant"
times: 20
concurrency: 5
context:
users:
tenants: 1
users_per_tenant: 1
GlanceImages.create_and_delete_image:
-
args:
image_location: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img"
container_format: "bare"
disk_format: "qcow2"
runner:
type: "constant"
times: 20
concurrency: 5
context:
users:
tenants: 5
users_per_tenant: 2
GlancePlugin.create_and_list:
-
args:
image_location: "~/.rally/extra/fake.img"
container_format: "bare"
disk_format: "qcow2"
runner:
type: "constant"
times: 700
concurrency: 7
context:
users:
tenants: 1
users_per_tenant: 1

View File

@ -1,9 +0,0 @@
Rally plugins
=============
All *.py modules from this directory will be auto-loaded by Rally and all
plugins will be discoverable. There is no need of any extra configuration
and there is no difference between writing them here and in rally code base.
Note that it is better to push all interesting and useful benchmarks to Rally
code base, this simplifies administration for Operators.

View File

@ -1,91 +0,0 @@
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Sample of plugin for Glance.
For more Glance related benchmarks take a look here:
github.com/stackforge/rally/blob/master/rally/benchmark/scenarios/glance/
About plugins: https://rally.readthedocs.org/en/latest/plugins.html
Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts
"""
import os
from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
class GlancePlugin(base.Scenario):
@base.atomic_action_timer("glance.create_image_label")
def _create_image(self, image_name, container_format,
image_location, disk_format, **kwargs):
"""Create a new image.
:param image_name: String used to name the image
:param container_format: Container format of image.
Acceptable formats: ami, ari, aki, bare, and ovf.
:param image_location: image file location used to upload
:param disk_format: Disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso.
:param **kwargs: optional parameters to create image
returns: object of image
"""
kw = {
"name": image_name,
"container_format": container_format,
"disk_format": disk_format,
}
kw.update(kwargs)
try:
if os.path.isfile(os.path.expanduser(image_location)):
kw["data"] = open(os.path.expanduser(image_location))
else:
kw["copy_from"] = image_location
image = self.clients("glance").images.create(**kw)
image = bench_utils.wait_for(
image,
is_ready=bench_utils.resource_is("active"),
update_resource=bench_utils.get_from_manager(),
timeout=100,
check_interval=0.5)
finally:
if "data" in kw:
kw["data"].close()
return image
@base.atomic_action_timer("glance.list_images_label")
def _list_images(self):
return list(self.clients("glance").images.list())
@base.scenario(context={"cleanup": ["glance"]})
def create_and_list(self, container_format,
image_location, disk_format, **kwargs):
self._create_image(self._generate_random_name(),
container_format,
image_location,
disk_format,
**kwargs)
self._list_images()

View File

@ -2,7 +2,8 @@
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
pbr>=0.6,!=0.7,<1.0 pbr>=1.6 # Apache-2.0
# #
# The greenlet package must be compiled with gcc and needs # The greenlet package must be compiled with gcc and needs
# the Python.h headers. Make sure you install the python-dev # the Python.h headers. Make sure you install the python-dev
@ -30,14 +31,6 @@ stevedore>=1.3.0,<1.4.0 # Apache-2.0
taskflow>=0.7.1,<0.8.0 taskflow>=0.7.1,<0.8.0
keystonemiddleware>=1.5.0,<1.6.0 keystonemiddleware>=1.5.0,<1.6.0
WSME>=0.6 WSME>=0.6
# For openstack/common/lockutils
posix_ipc
# For Swift storage backend.
python-swiftclient>=2.2.0,<2.5.0
# For VMware storage backed.
oslo_vmware>=0.11.1,<0.12.0 # Apache-2.0
# For paste.util.template used in keystone.common.template # For paste.util.template used in keystone.common.template
Paste Paste
@ -54,12 +47,10 @@ oslo_log>=1.0.0,<1.1.0 # Apache-2.0
oslo_messaging>=1.8.0,<1.9.0 # Apache-2.0 oslo_messaging>=1.8.0,<1.9.0 # Apache-2.0
oslo_policy>=0.3.1,<0.4.0 # Apache-2.0 oslo_policy>=0.3.1,<0.4.0 # Apache-2.0
oslo_serialization>=1.4.0,<1.5.0 # Apache-2.0 oslo_serialization>=1.4.0,<1.5.0 # Apache-2.0
oslo.service>=0.1.0 # Apache-2.0
retrying>=1.2.3,!=1.3.0 # Apache-2.0 retrying>=1.2.3,!=1.3.0 # Apache-2.0
osprofiler>=0.3.0 # Apache-2.0 osprofiler>=0.3.0 # Apache-2.0
# Glance Store
glance_store>=0.3.0,<0.5.0 # Apache-2.0
# Artifact repository # Artifact repository
semantic_version>=2.3.1 semantic_version>=2.3.1