Pushing initial work
Change-Id: I4a63e06985dafc15fcc50f325e49fe811d5a84b9
This commit is contained in:
parent
4e7b1b12aa
commit
da94b9f21e
|
@ -0,0 +1,3 @@
|
|||
.testrepository
|
||||
.tox
|
||||
cloudkitty.egg-info
|
|
@ -0,0 +1,4 @@
|
|||
[DEFAULT]
|
||||
test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
|
@ -0,0 +1,6 @@
|
|||
# CloudKitty: An OpenStack Billing and Usage Reporter #
|
||||
|
||||
## Goal ##
|
||||
|
||||
The goal of this project is automate the extraction of the metrics from
|
||||
ceilometer, map them to billing informations and generate reports.
|
|
@ -0,0 +1,36 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: backend/base.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, Base backend (Abstract)
|
||||
"""
|
||||
|
||||
|
||||
class BaseIOBackend(object):
|
||||
def __init__(self, path):
|
||||
self.open(path)
|
||||
|
||||
def open(self, path):
|
||||
raise NotImplementedError
|
||||
|
||||
def tell(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def seek(self, offset, from_what=0):
|
||||
# 0 beg, 1 cur, 2 end
|
||||
raise NotImplementedError
|
||||
|
||||
def flush(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def write(self, data):
|
||||
raise NotImplementedError
|
||||
|
||||
def read(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
raise NotImplementedError
|
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: backend/file.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, Simple file backend
|
||||
"""
|
||||
|
||||
|
||||
class FileBackend(file):
|
||||
def __init__(self, path, mode='ab+'):
|
||||
try:
|
||||
super(FileBackend, self).__init__(path, mode)
|
||||
except IOError:
|
||||
# File not found
|
||||
super(FileBackend, self).__init__(path, 'wb+')
|
|
@ -0,0 +1,17 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: billing/base.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, Billing processor base class.
|
||||
"""
|
||||
|
||||
|
||||
class BaseBillingProcessor(object):
|
||||
def __init__(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def process(self, data):
|
||||
raise NotImplementedError()
|
|
@ -0,0 +1,57 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: billing/hash.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, HashMap Billing processor.
|
||||
"""
|
||||
import json
|
||||
|
||||
from cloudkitty.billing.base import BaseBillingProcessor
|
||||
|
||||
|
||||
class BasicHashMap(BaseBillingProcessor):
|
||||
def __init__(self):
|
||||
self._billing_info = {}
|
||||
self._load_billing_rates()
|
||||
|
||||
def _load_billing_rates(self):
|
||||
# FIXME We should use another path
|
||||
self._billing_info = json.loads(open('billing_info.json').read())
|
||||
|
||||
def process_service(self, name, data):
|
||||
if name not in self._billing_info:
|
||||
return
|
||||
serv_b_info = self._billing_info[name]
|
||||
for entry in data:
|
||||
flat = 0
|
||||
rate = 1
|
||||
entry_desc = entry['desc']
|
||||
for field in serv_b_info:
|
||||
if field not in entry_desc:
|
||||
continue
|
||||
b_info = serv_b_info[field]
|
||||
if b_info['type'] == 'rate':
|
||||
if entry_desc[field] in b_info['map']:
|
||||
rate *= b_info['map'][entry_desc[field]]
|
||||
elif 'default' in b_info['map']:
|
||||
rate *= b_info['map']['default']
|
||||
elif b_info['type'] == 'flat':
|
||||
new_flat = 0
|
||||
if entry_desc[field] in b_info['map']:
|
||||
new_flat = b_info['map'][entry_desc[field]]
|
||||
elif 'default' in b_info['map']:
|
||||
new_flat = b_info['map']['default']
|
||||
if new_flat > flat:
|
||||
flat = new_flat
|
||||
billing_info = {'price': flat * rate}
|
||||
entry['billing'] = billing_info
|
||||
|
||||
def process(self, data):
|
||||
for cur_data in data:
|
||||
cur_usage = cur_data['usage']
|
||||
for service in cur_usage:
|
||||
self.process_service(service, cur_usage[service])
|
||||
return data
|
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: billing/noop.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, Dummy NOOP Billing Processor
|
||||
"""
|
||||
from cloudkitty.billing.base import BaseBillingProcessor
|
||||
|
||||
|
||||
class Noop(BaseBillingProcessor):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def process(self, data):
|
||||
for cur_data in data:
|
||||
cur_usage = cur_data['usage']
|
||||
for service in cur_usage:
|
||||
for entry in cur_usage[service]:
|
||||
if 'billing' not in entry:
|
||||
entry['billing'] = {}
|
||||
return data
|
|
@ -0,0 +1,55 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: base.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, Base Collector
|
||||
"""
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import cloudkitty.utils as utils
|
||||
|
||||
|
||||
class BaseCollector(object):
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
self.user = kwargs['user']
|
||||
self.password = kwargs['password']
|
||||
self.tenant = kwargs['tenant']
|
||||
self.region = kwargs['region']
|
||||
self.keystone_url = kwargs['keystone_url']
|
||||
self.period = kwargs['period']
|
||||
except IndexError as e:
|
||||
raise ValueError("Missing argument (%s)" % e)
|
||||
|
||||
self._conn = None
|
||||
self._connect()
|
||||
|
||||
def _connect(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def last_month():
|
||||
now = datetime.now()
|
||||
month_end = datetime(now.year, now.month, 1) - timedelta(days=1)
|
||||
month_start = month_end.replace(day=1)
|
||||
start_ts = utils.dt2ts(month_start)
|
||||
end_ts = utils.dt2ts(month_end)
|
||||
return start_ts, end_ts
|
||||
|
||||
@staticmethod
|
||||
def current_month():
|
||||
now = datetime.now()
|
||||
month_start = datetime(now.year, now.month, 1)
|
||||
return utils.dt2ts(month_start)
|
||||
|
||||
def retrieve(self, resource, start, end=None, project_id=None,
|
||||
q_filter=None):
|
||||
trans_resource = 'get_'
|
||||
trans_resource += resource.replace('.', '_')
|
||||
if not hasattr(self, trans_resource):
|
||||
return None
|
||||
func = getattr(self, trans_resource)
|
||||
return func(start, end, project_id, q_filter)
|
|
@ -0,0 +1,139 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: ceilometer.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, Ceilometer Collector
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
from ceilometerclient import client as cclient
|
||||
|
||||
from cloudkitty.collector.base import BaseCollector
|
||||
|
||||
|
||||
class CeilometerCollector(BaseCollector):
|
||||
def __init__(self, **kwargs):
|
||||
super(CeilometerCollector, self).__init__(**kwargs)
|
||||
|
||||
self._resource_cache = {}
|
||||
|
||||
def _connect(self):
|
||||
"""
|
||||
Initialize connection to the Ceilometer endpoint.
|
||||
"""
|
||||
self._conn = cclient.get_client('2', os_username=self.user,
|
||||
os_password=self.password,
|
||||
os_auth_url=self.keystone_url,
|
||||
os_tenant_name=self.tenant,
|
||||
os_region_name=self.region)
|
||||
|
||||
def gen_filter(self, op='eq', **kwargs):
|
||||
"""
|
||||
Generate ceilometer filter from kwargs.
|
||||
"""
|
||||
q_filter = []
|
||||
for kwarg in kwargs:
|
||||
q_filter.append({'field': kwarg, 'op': op, 'value': kwargs[kwarg]})
|
||||
return q_filter
|
||||
|
||||
def prepend_filter(self, prepend, **kwargs):
|
||||
"""
|
||||
Prepend the dict key with the prepend value, useful to compose filters.
|
||||
"""
|
||||
q_filter = {}
|
||||
for kwarg in kwargs:
|
||||
q_filter[prepend + kwarg] = kwargs[kwarg]
|
||||
return q_filter
|
||||
|
||||
def user_metadata_filter(self, op='eq', **kwargs):
|
||||
"""
|
||||
Create user_metadata filter from kwargs.
|
||||
"""
|
||||
user_filter = {}
|
||||
for kwarg in kwargs:
|
||||
field = kwarg
|
||||
# Auto replace of . to _ to match ceilometer behaviour
|
||||
if '.' in field:
|
||||
field = field.replace('.', '_')
|
||||
user_filter[field] = kwargs[kwarg]
|
||||
user_filter = self.prepend_filter('user_metadata.', **user_filter)
|
||||
return self.metadata_filter(op, **user_filter)
|
||||
|
||||
def metadata_filter(self, op='eq', **kwargs):
|
||||
"""
|
||||
Create metadata filter from kwargs.
|
||||
"""
|
||||
meta_filter = self.prepend_filter('metadata.', **kwargs)
|
||||
return self.gen_filter(op, **meta_filter)
|
||||
|
||||
def get_active_instances(self, start, end=None, project_id=None,
|
||||
q_filter=None):
|
||||
"""
|
||||
Return the number of instance that were active during the
|
||||
timespan.
|
||||
"""
|
||||
start_iso = datetime.fromtimestamp(start).isoformat()
|
||||
req_filter = self.gen_filter(op='ge', timestamp=start_iso)
|
||||
if project_id:
|
||||
req_filter.extend(self.gen_filter(project=project_id))
|
||||
if end:
|
||||
end_iso = datetime.fromtimestamp(end).isoformat()
|
||||
req_filter.extend(self.gen_filter(op='le', timestamp=end_iso))
|
||||
if isinstance(q_filter, list):
|
||||
req_filter.extend(q_filter)
|
||||
elif q_filter:
|
||||
req_filter.append(q_filter)
|
||||
instance_stats = self._conn.statistics.list(meter_name='instance',
|
||||
period=0, q=req_filter,
|
||||
groupby=['resource_id'])
|
||||
return [instance.groupby['resource_id'] for instance in instance_stats]
|
||||
|
||||
def get_compute(self, start, end=None, project_id=None, q_filter=None):
|
||||
active_instances = self.get_active_instances(start, end, project_id,
|
||||
q_filter)
|
||||
compute_data = []
|
||||
volume_data = {'unit': 'instance', 'qty': 1}
|
||||
for instance in active_instances:
|
||||
instance_data = {}
|
||||
instance_data['desc'] = self.get_resource_detail(instance)
|
||||
instance_data['desc']['instance_id'] = instance
|
||||
instance_data['vol'] = volume_data
|
||||
compute_data.append(instance_data)
|
||||
|
||||
data = {}
|
||||
data['compute'] = compute_data
|
||||
return data
|
||||
|
||||
def _strip_compute(self, data):
|
||||
res_data = {}
|
||||
res_data['name'] = data.metadata.get('display_name')
|
||||
res_data['flavor'] = data.metadata.get('flavor.name')
|
||||
res_data['vcpus'] = data.metadata.get('vcpus')
|
||||
res_data['memory'] = data.metadata.get('memory_mb')
|
||||
res_data['image_id'] = data.metadata.get('image.id')
|
||||
res_data['availability_zone'] = \
|
||||
data.metadata.get('OS-EXT-AZ.availability_zone')
|
||||
|
||||
res_data['project_id'] = data.project_id
|
||||
res_data['user_id'] = data.user_id
|
||||
|
||||
res_data['metadata'] = {}
|
||||
for field in data.metadata:
|
||||
if field.startswith('user_metadata'):
|
||||
res_data['metadata'][field[14:]] = data.metadata[field]
|
||||
|
||||
return res_data
|
||||
|
||||
def strip_resource_data(self, res_data, res_type='compute'):
|
||||
if res_type == 'compute':
|
||||
return self._strip_compute(res_data)
|
||||
|
||||
def get_resource_detail(self, resource_id):
|
||||
if resource_id not in self._resource_cache:
|
||||
resource = self._conn.resources.get(resource_id)
|
||||
resource = self.strip_resource_data(resource)
|
||||
self._resource_cache[resource_id] = resource
|
||||
return self._resource_cache[resource_id]
|
|
@ -0,0 +1,65 @@
|
|||
from oslo.config import cfg
|
||||
|
||||
|
||||
auth_opts = [
|
||||
cfg.StrOpt('username',
|
||||
default='',
|
||||
help='OpenStack username.'),
|
||||
cfg.StrOpt('password',
|
||||
default='',
|
||||
help='OpenStack password.'),
|
||||
cfg.StrOpt('tenant',
|
||||
default='',
|
||||
help='OpenStack tenant.'),
|
||||
cfg.StrOpt('region',
|
||||
default='',
|
||||
help='OpenStack region.'),
|
||||
cfg.StrOpt('url',
|
||||
default='',
|
||||
help='OpenStack auth URL.'), ]
|
||||
|
||||
collect_opts = [
|
||||
cfg.StrOpt('collector',
|
||||
default='cloudkitty.collector.ceilometer.CeilometerCollector',
|
||||
help='Data collector.'),
|
||||
cfg.IntOpt('window',
|
||||
default=1800,
|
||||
help='Number of samples to collect per call.'),
|
||||
cfg.IntOpt('period',
|
||||
default=3600,
|
||||
help='Billing period in seconds.'),
|
||||
cfg.ListOpt('services',
|
||||
default=['compute'],
|
||||
help='Services to monitor.'), ]
|
||||
|
||||
state_opts = [
|
||||
cfg.StrOpt('backend',
|
||||
default='cloudkitty.backend.file.FileBackend',
|
||||
help='Backend for the state manager.'),
|
||||
cfg.StrOpt('basepath',
|
||||
default='/var/lib/cloudkitty/states/',
|
||||
help='Storage directory for the file state backend.'), ]
|
||||
|
||||
billing_opts = [
|
||||
cfg.ListOpt('pipeline',
|
||||
default=['cloudkitty.billing.hash.BasicHashMap',
|
||||
'cloudkitty.billing.noop.Noop'],
|
||||
help='Billing pipeline modules.'), ]
|
||||
|
||||
output_opts = [
|
||||
cfg.StrOpt('backend',
|
||||
default='cloudkitty.backend.file.FileBackend',
|
||||
help='Backend for the output manager.'),
|
||||
cfg.StrOpt('basepath',
|
||||
default='/var/lib/cloudkitty/states/',
|
||||
help='Storage directory for the file output backend.'),
|
||||
cfg.ListOpt('pipeline',
|
||||
default=['cloudkitty.writer.osrf.OSRFBackend'],
|
||||
help='Output pipeline'), ]
|
||||
|
||||
|
||||
cfg.CONF.register_opts(auth_opts, 'auth')
|
||||
cfg.CONF.register_opts(collect_opts, 'collect')
|
||||
cfg.CONF.register_opts(state_opts, 'state')
|
||||
cfg.CONF.register_opts(billing_opts, 'billing')
|
||||
cfg.CONF.register_opts(output_opts, 'output')
|
|
@ -0,0 +1,122 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: orchestrator.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, Orchestrator
|
||||
"""
|
||||
from datetime import datetime
|
||||
import sys
|
||||
import time
|
||||
|
||||
from keystoneclient.v2_0 import client as kclient
|
||||
from oslo.config import cfg
|
||||
|
||||
import cloudkitty.utils as utils
|
||||
import cloudkitty.config # NOQA
|
||||
from cloudkitty.state import StateManager
|
||||
from cloudkitty.write_orchestrator import WriteOrchestrator
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class Orchestrator(object):
|
||||
def __init__(self):
|
||||
# Billing settings
|
||||
self.billing_pipeline = []
|
||||
for billing_processor in CONF.billing.pipeline:
|
||||
self.billing_pipeline.append(utils.import_class(billing_processor))
|
||||
# Output settings
|
||||
self.output_pipeline = []
|
||||
for writer in CONF.output.pipeline:
|
||||
self.output_pipeline.append(utils.import_class(writer))
|
||||
|
||||
self.keystone = kclient.Client(username=CONF.auth.username,
|
||||
password=CONF.auth.password,
|
||||
tenant_name=CONF.auth.tenant,
|
||||
region_name=CONF.auth.region,
|
||||
auth_url=CONF.auth.url)
|
||||
|
||||
self.sm = StateManager(utils.import_class(CONF.state.backend),
|
||||
CONF.state.basepath,
|
||||
self.keystone.user_id,
|
||||
'osrtf')
|
||||
|
||||
collector = utils.import_class(CONF.collect.collector)
|
||||
self.collector = collector(user=CONF.auth.username,
|
||||
password=CONF.auth.password,
|
||||
tenant=CONF.auth.tenant,
|
||||
region=CONF.auth.region,
|
||||
keystone_url=CONF.auth.url,
|
||||
period=CONF.collect.period)
|
||||
|
||||
self.wo = WriteOrchestrator(utils.import_class(CONF.output.backend),
|
||||
utils.import_class(CONF.state.backend),
|
||||
self.keystone.user_id,
|
||||
self.sm)
|
||||
|
||||
for writer in self.output_pipeline:
|
||||
self.wo.add_writer(writer)
|
||||
|
||||
def _check_state(self):
|
||||
def _get_this_month_timestamp():
|
||||
now = datetime.now()
|
||||
month_start = datetime(now.year, now.month, 1)
|
||||
timestamp = int(time.mktime(month_start.timetuple()))
|
||||
return timestamp
|
||||
|
||||
timestamp = self.sm.get_state()
|
||||
if not timestamp:
|
||||
return _get_this_month_timestamp()
|
||||
|
||||
now = int(time.time())
|
||||
if timestamp + CONF.collect.period < now:
|
||||
return timestamp
|
||||
return 0
|
||||
|
||||
def _collect(self, service, start_timestamp):
|
||||
next_timestamp = start_timestamp + CONF.collect.period
|
||||
raw_data = self.collector.retrieve(service,
|
||||
start_timestamp,
|
||||
next_timestamp)
|
||||
|
||||
timed_data = [{'period': {'begin': start_timestamp,
|
||||
'end': next_timestamp},
|
||||
'usage': raw_data}]
|
||||
return timed_data
|
||||
|
||||
def process(self):
|
||||
while True:
|
||||
timestamp = self._check_state()
|
||||
if not timestamp:
|
||||
print "Nothing left to do."
|
||||
break
|
||||
|
||||
for service in CONF.collect.services:
|
||||
data = self._collect(service, timestamp)
|
||||
|
||||
# Billing
|
||||
for b_proc in self.billing_pipeline:
|
||||
b_obj = b_proc()
|
||||
data = b_obj.process(data)
|
||||
|
||||
# Writing
|
||||
self.wo.append(data)
|
||||
|
||||
# We're getting a full period so we directly commit
|
||||
self.wo.commit()
|
||||
|
||||
self.wo.close()
|
||||
|
||||
|
||||
def main():
|
||||
CONF(sys.argv[1:], project='cloudkitty')
|
||||
orchestrator = Orchestrator()
|
||||
orchestrator.process()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,84 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: state.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, State tracking
|
||||
"""
|
||||
import json
|
||||
|
||||
|
||||
class StateManager(object):
|
||||
def __init__(self, state_backend, state_basepath, user_id, report_type,
|
||||
distributed=False):
|
||||
self._backend = state_backend
|
||||
self._basepath = state_basepath
|
||||
self._uid = user_id
|
||||
self._type = report_type
|
||||
self._distributed = distributed
|
||||
|
||||
# States
|
||||
self._ts = None
|
||||
self._metadata = {}
|
||||
|
||||
def _gen_filename(self):
|
||||
filename = '{}_{}.state'.format(self._type, self._uid)
|
||||
return filename
|
||||
|
||||
def _open(self, mode='rb'):
|
||||
filename = self._gen_filename()
|
||||
state_file = self._backend(filename, mode)
|
||||
return state_file
|
||||
|
||||
def _load(self):
|
||||
try:
|
||||
state_file = self._open()
|
||||
state_data = json.loads(state_file.read())
|
||||
self._ts = state_data['timestamp']
|
||||
self._metadata = state_data['metadata']
|
||||
state_file.close()
|
||||
except IOError:
|
||||
pass
|
||||
|
||||
def _update(self):
|
||||
state_file = self._open('wb')
|
||||
state_data = {'timestamp': self._ts,
|
||||
'metadata': self._metadata}
|
||||
state_file.write(json.dumps(state_data))
|
||||
state_file.close()
|
||||
|
||||
def set_state(self, timestamp):
|
||||
"""
|
||||
Set the current state's timestamp
|
||||
"""
|
||||
if self._distributed:
|
||||
self._load()
|
||||
self._ts = timestamp
|
||||
self._update()
|
||||
|
||||
def get_state(self):
|
||||
"""
|
||||
Get the state timestamp
|
||||
"""
|
||||
if self._distributed:
|
||||
self._load()
|
||||
return self._ts
|
||||
|
||||
def set_metadata(self, metadata):
|
||||
"""
|
||||
Set metadata attached to the state
|
||||
"""
|
||||
if self._distributed:
|
||||
self._load()
|
||||
self._metadata = metadata
|
||||
self._update()
|
||||
|
||||
def get_metadata(self):
|
||||
"""
|
||||
Get metadata attached to the state
|
||||
"""
|
||||
if self._distributed:
|
||||
self._load()
|
||||
return self._metadata
|
|
@ -0,0 +1,24 @@
|
|||
# -*- coding: utf8 -*-
|
||||
import sys
|
||||
import time
|
||||
|
||||
import iso8601
|
||||
|
||||
|
||||
def dt2ts(orig_dt):
|
||||
return int(time.mktime(orig_dt.timetuple()))
|
||||
|
||||
|
||||
def iso2dt(iso_date):
|
||||
return iso8601.parse_date(iso_date)
|
||||
|
||||
|
||||
def import_class(import_str):
|
||||
mod_str, _sep, class_str = import_str.rpartition('.')
|
||||
if not mod_str:
|
||||
mod_str = '__builtin__'
|
||||
try:
|
||||
__import__(mod_str)
|
||||
return getattr(sys.modules[mod_str], class_str)
|
||||
except (ValueError, AttributeError):
|
||||
raise ImportError('Class %s cannot be found.' % class_str)
|
|
@ -0,0 +1,197 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: write_orchestrator.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, Write Orchestrator used to handle writing pipeline
|
||||
operations and temporary states storage.
|
||||
"""
|
||||
import json
|
||||
from datetime import datetime
|
||||
from zipfile import ZipFile
|
||||
|
||||
import cloudkitty.utils as utils
|
||||
|
||||
|
||||
class OSRTFBackend(object):
|
||||
"""
|
||||
Native backend for transient report storage.
|
||||
Used to store data from the output of the billing pipeline.
|
||||
"""
|
||||
def __init__(self, backend):
|
||||
self._backend = backend
|
||||
self._osrtf = None
|
||||
|
||||
def open(self, filename):
|
||||
self._osrtf = ZipFile(self._backend(filename, 'ab+'), 'a')
|
||||
|
||||
def _gen_filename(self, timeframe):
|
||||
filename = '{}-{:02d}-{:02d}-{}-{}.json'.format(timeframe.year,
|
||||
timeframe.month,
|
||||
timeframe.day,
|
||||
timeframe.hour,
|
||||
timeframe.minute)
|
||||
return filename
|
||||
|
||||
def _file_exists(self, filename):
|
||||
for file_info in self._osrtf.infolist():
|
||||
if file_info.filename == filename:
|
||||
return True
|
||||
return False
|
||||
|
||||
def add(self, timeframe, data):
|
||||
"""
|
||||
Add the data to the OpenStack Report Transient Format.
|
||||
"""
|
||||
filename = self._gen_filename(timeframe)
|
||||
# We can only check for the existence of a file not rewrite or delete
|
||||
# it
|
||||
if not self._file_exists(filename):
|
||||
self._osrtf.writestr(filename, json.dumps(data))
|
||||
|
||||
def get(self, timeframe):
|
||||
try:
|
||||
filename = self._gen_filename(timeframe)
|
||||
data = json.loads(self._osrtf.read(filename))
|
||||
return data
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
class WriteOrchestrator(object):
|
||||
"""
|
||||
Write Orchestrator:
|
||||
Handle incoming data from the global orchestrator, and store them in an
|
||||
intermediary data format before final transformation.
|
||||
"""
|
||||
def __init__(self, backend, state_backend, user_id, state_manager,
|
||||
period=3600):
|
||||
self._backend = backend
|
||||
self._state_backend = state_backend
|
||||
self._uid = user_id
|
||||
self._period = period
|
||||
self._sm = state_manager
|
||||
self._osrtf = None
|
||||
self._write_pipeline = []
|
||||
|
||||
# State vars
|
||||
self.usage_start = None
|
||||
self.usage_start_dt = None
|
||||
self.usage_end = None
|
||||
self.usage_end_dt = None
|
||||
|
||||
# Current total
|
||||
self.total = 0
|
||||
|
||||
# Current usage period lines
|
||||
self._usage_data = {}
|
||||
|
||||
def add_writer(self, writer_class):
|
||||
writer = writer_class(self,
|
||||
self._uid,
|
||||
self._backend,
|
||||
self._state_backend)
|
||||
self._write_pipeline.append(writer)
|
||||
|
||||
def _gen_osrtf_filename(self, timeframe):
|
||||
if not isinstance(timeframe, datetime):
|
||||
raise TypeError('timeframe should be of type datetime.')
|
||||
date = '{}-{:02d}'.format(timeframe.year, timeframe.month)
|
||||
filename = '{}-osrtf-{}.zip'.format(self._uid, date)
|
||||
return filename
|
||||
|
||||
def _update_state_manager(self):
|
||||
self._sm.set_state(self.usage_end)
|
||||
metadata = {'total': self.total}
|
||||
self._sm.set_metadata(metadata)
|
||||
|
||||
def _get_state_manager_timeframe(self):
|
||||
timeframe = self._sm.get_state()
|
||||
self.usage_start = datetime.fromtimestamp(timeframe)
|
||||
self.usage_end = datetime.fromtimestamp(timeframe + self._period)
|
||||
metadata = self._sm.get_metadata()
|
||||
self.total = metadata.get('total', 0)
|
||||
|
||||
def _filter_period(self, json_data):
|
||||
"""
|
||||
Detect the best usage period to extract. Removes the usage from the
|
||||
json data and returns it.
|
||||
"""
|
||||
candidate_ts = None
|
||||
candidate_idx = 0
|
||||
|
||||
for idx, usage in enumerate(json_data):
|
||||
usage_ts = usage['period']['begin']
|
||||
if candidate_ts is None or usage_ts < candidate_ts:
|
||||
candidate_ts = usage_ts
|
||||
candidate_idx = idx
|
||||
|
||||
if candidate_ts:
|
||||
return candidate_ts, json_data.pop(candidate_idx)['usage']
|
||||
|
||||
def _format_data(self, timeframe, data):
|
||||
beg = utils.dt2ts(timeframe)
|
||||
end = beg + self._period
|
||||
final_data = {'period': {'begin': beg, 'end': end}}
|
||||
final_data['usage'] = data
|
||||
return [final_data]
|
||||
|
||||
def _pre_commit(self):
|
||||
if self._osrtf is None:
|
||||
self._osrtf = OSRTFBackend(self._backend)
|
||||
filename = self._gen_osrtf_filename(self.usage_start_dt)
|
||||
self._osrtf.open(filename)
|
||||
|
||||
def _commit(self):
|
||||
self._pre_commit()
|
||||
|
||||
self._osrtf.add(self.usage_start_dt, self._usage_data)
|
||||
|
||||
# Dispatch data to writing pipeline
|
||||
for backend in self._write_pipeline:
|
||||
backend.append(self._usage_data, self.usage_start, self.usage_end)
|
||||
|
||||
self._update_state_manager()
|
||||
|
||||
self._usage_data = {}
|
||||
|
||||
def _dispatch(self, data):
|
||||
for service in data:
|
||||
if service in self._usage_data:
|
||||
self._usage_data[service].extend(data[service])
|
||||
else:
|
||||
self._usage_data[service] = data[service]
|
||||
# Update totals
|
||||
for entry in data[service]:
|
||||
self.total += entry['billing']['price']
|
||||
|
||||
def get_timeframe(self, timeframe):
|
||||
if self._osrtf is None:
|
||||
self._osrtf = OSRTFBackend(self._backend)
|
||||
self._osrtf.open(self._gen_osrtf_filename(timeframe))
|
||||
data = self._osrtf.get(timeframe)
|
||||
return self._format_data(timeframe, data)
|
||||
|
||||
def append(self, raw_data):
|
||||
while raw_data:
|
||||
usage_start, data = self._filter_period(raw_data)
|
||||
if self.usage_end is not None and usage_start >= self.usage_end:
|
||||
self._commit()
|
||||
self.usage_start = None
|
||||
|
||||
if self.usage_start is None:
|
||||
self.usage_start = usage_start
|
||||
self.usage_end = usage_start + self._period
|
||||
self.usage_start_dt = datetime.fromtimestamp(self.usage_start)
|
||||
self.usage_end_dt = datetime.fromtimestamp(self.usage_end)
|
||||
|
||||
self._dispatch(data)
|
||||
|
||||
def commit(self):
|
||||
self._commit()
|
||||
|
||||
def close(self):
|
||||
for writer in self._write_pipeline:
|
||||
writer.close()
|
|
@ -0,0 +1,130 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: writer/base.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, Report Writer base class
|
||||
"""
|
||||
from datetime import datetime
|
||||
from cloudkitty.state import StateManager
|
||||
|
||||
|
||||
class BaseReportWriter(object):
|
||||
"""
|
||||
Base report writer.
|
||||
"""
|
||||
report_type = None
|
||||
|
||||
def __init__(self, write_orchestrator, user_id, backend, state_backend):
|
||||
self._write_orchestrator = write_orchestrator
|
||||
self._write_backend = backend
|
||||
self._uid = user_id
|
||||
self._sm = StateManager(state_backend, None, self._uid,
|
||||
self.report_type)
|
||||
self._report = None
|
||||
self.period = 3600
|
||||
|
||||
# State vars
|
||||
self.checked_first_line = False
|
||||
self.usage_start = None
|
||||
self.usage_start_dt = None
|
||||
self.usage_end = None
|
||||
self.usage_end_dt = None
|
||||
|
||||
# Current total
|
||||
self.total = 0
|
||||
|
||||
# Current usage period lines
|
||||
self._usage_data = {}
|
||||
|
||||
def _gen_filename(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _open(self):
|
||||
filename = self._gen_filename()
|
||||
self._report = self._write_backend(filename, 'wb+')
|
||||
self._report.seek(0, 2)
|
||||
|
||||
def _get_report_size(self):
|
||||
return self._report.tell()
|
||||
|
||||
def _recover_state(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _update_state_manager(self):
|
||||
self._sm.set_state(self.usage_end)
|
||||
metadata = {'total': self.total}
|
||||
self._sm.set_metadata(metadata)
|
||||
|
||||
def _get_state_manager_timeframe(self):
|
||||
timeframe = self._sm.get_state()
|
||||
self.usage_start = timeframe
|
||||
self.usage_start_dt = datetime.fromtimestamp(timeframe)
|
||||
self.usage_end = datetime.fromtimestamp(timeframe + self.period)
|
||||
metadata = self._sm.get_metadata()
|
||||
self.total = metadata.get('total', 0)
|
||||
|
||||
def get_timeframe(self, timeframe):
|
||||
return self._write_orchestrator.get_timeframe(timeframe)
|
||||
|
||||
def _write_header(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _write(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _pre_commit(self):
|
||||
if self._report is None:
|
||||
self._open()
|
||||
if not self.checked_first_line:
|
||||
if self._get_report_size() == 0:
|
||||
self._write_header()
|
||||
else:
|
||||
self._recover_state()
|
||||
self.checked_first_line = True
|
||||
|
||||
def _commit(self):
|
||||
self._pre_commit()
|
||||
|
||||
self._write()
|
||||
self._update_state_manager()
|
||||
|
||||
self._post_commit()
|
||||
|
||||
def _post_commit(self):
|
||||
self._usage_data = {}
|
||||
|
||||
def _update(self, data):
|
||||
for service in data:
|
||||
if service in self._usage_data:
|
||||
self._usage_data[service].extend(data[service])
|
||||
else:
|
||||
self._usage_data[service] = data[service]
|
||||
# Update totals
|
||||
for entry in data[service]:
|
||||
self.total += entry['billing']['price']
|
||||
|
||||
def append(self, data, start, end):
|
||||
# FIXME we should use the real time values
|
||||
if self.usage_end is not None and start >= self.usage_end:
|
||||
self._commit()
|
||||
self.usage_start = None
|
||||
|
||||
if self.usage_start is None:
|
||||
self.usage_start = start
|
||||
self.usage_end = start + self.period
|
||||
self.usage_start_dt = datetime.fromtimestamp(self.usage_start)
|
||||
self.usage_end_dt = datetime.fromtimestamp(self.usage_end)
|
||||
|
||||
self._update(data)
|
||||
|
||||
def commit(self):
|
||||
self._commit()
|
||||
|
||||
def _close_file(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self):
|
||||
self._close_file()
|
|
@ -0,0 +1,73 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
File: writer/osrf.py
|
||||
Author: Stephane Albert
|
||||
Email: stephane.albert@objectif-libre.com
|
||||
Github: http://github.com/objectiflibre
|
||||
Description: CloudKitty, OpenStack native Report Format
|
||||
"""
|
||||
import json
|
||||
from cloudkitty.writer.base import BaseReportWriter
|
||||
|
||||
|
||||
class OSRFBackend(BaseReportWriter):
|
||||
"""
|
||||
OpenStack Report Format Writer:
|
||||
Generates report in native format (json)
|
||||
"""
|
||||
report_type = 'osrf'
|
||||
|
||||
def _gen_filename(self, timeframe):
|
||||
filename = '{}-osrf-{}-{:02d}.json'.format(self._uid,
|
||||
timeframe.year,
|
||||
timeframe.month)
|
||||
return filename
|
||||
|
||||
def _open(self):
|
||||
filename = self._gen_filename(self.usage_start_dt)
|
||||
self._report = self._write_backend(filename, 'rb+')
|
||||
self._recover_state()
|
||||
|
||||
def _write_header(self):
|
||||
self._report.write('[')
|
||||
|
||||
def _write_total(self):
|
||||
total = {'total': self.total}
|
||||
self._report.write(json.dumps(total))
|
||||
|
||||
def _recover_state(self):
|
||||
# Search for last comma
|
||||
self._report.seek(0, 2)
|
||||
max_idx = self._report.tell()
|
||||
if max_idx > 2000:
|
||||
max_idx = 2000
|
||||
for idx in range(10, max_idx, 10):
|
||||
self._report.seek(-idx, 2)
|
||||
hay = self._report.read()
|
||||
if hay.count(','):
|
||||
break
|
||||
last_comma = hay.rfind(',')
|
||||
if last_comma > 0:
|
||||
last_comma -= len(hay)
|
||||
else:
|
||||
raise RuntimeError('Unable to recover file state.')
|
||||
self._report.seek(last_comma, 2)
|
||||
self._report.write(', ')
|
||||
self._report.truncate()
|
||||
|
||||
def _close_file(self):
|
||||
if self._report is not None:
|
||||
self._recover_state()
|
||||
self._write_total()
|
||||
self._report.write(']')
|
||||
self._report.close()
|
||||
|
||||
def _write(self):
|
||||
data = {}
|
||||
data['period'] = {'begin': self.usage_start_dt.isoformat(),
|
||||
'end': self.usage_end_dt.isoformat()}
|
||||
data['usage'] = self._usage_data
|
||||
|
||||
self._report.write(json.dumps(data))
|
||||
self._report.write(', ')
|
|
@ -0,0 +1 @@
|
|||
build
|
|
@ -0,0 +1,177 @@
|
|||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = build
|
||||
|
||||
# User-friendly check for sphinx-build
|
||||
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||
endif
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " xml to make Docutils-native XML files"
|
||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cloudkitty.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cloudkitty.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/cloudkitty"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cloudkitty"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
latexpdfja:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
||||
xml:
|
||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||
@echo
|
||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||
|
||||
pseudoxml:
|
||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||
@echo
|
||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
|
@ -0,0 +1,258 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# cloudkitty documentation build configuration file, created by
|
||||
# sphinx-quickstart on Wed May 14 23:05:42 2014.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = []
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'cloudkitty'
|
||||
copyright = u'2014, Objectif Libre'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '0.1'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '0.1'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = []
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
# If true, keep warnings as "system message" paragraphs in the built documents.
|
||||
#keep_warnings = False
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
# directly to the root of the documentation.
|
||||
#html_extra_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'cloudkittydoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
('index', 'cloudkitty.tex', u'cloudkitty Documentation',
|
||||
u'Objectif Libre', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'cloudkitty', u'cloudkitty Documentation',
|
||||
[u'Objectif Libre'], 1)
|
||||
]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#man_show_urls = False
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
('index', 'cloudkitty', u'cloudkitty Documentation',
|
||||
u'Objectif Libre', 'cloudkitty', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#texinfo_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#texinfo_domain_indices = True
|
||||
|
||||
# How to display URL addresses: 'footnote', 'no', or 'inline'.
|
||||
#texinfo_show_urls = 'footnote'
|
||||
|
||||
# If true, do not generate a @detailmenu in the "Top" node's menu.
|
||||
#texinfo_no_detailmenu = False
|
|
@ -0,0 +1,22 @@
|
|||
.. cloudkitty documentation master file, created by
|
||||
sphinx-quickstart on Wed May 14 23:05:42 2014.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to cloudkitty's documentation!
|
||||
======================================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
python-ceilometerclient
|
||||
python-keystoneclient
|
||||
iso8601
|
||||
oslo.config>=1.2.0
|
||||
pbr>=0.6,<1.0
|
|
@ -0,0 +1,20 @@
|
|||
[metadata]
|
||||
name = cloudkitty
|
||||
version = 0.1
|
||||
summary = OpenStack Billing and Usage Reporter
|
||||
|
||||
[global]
|
||||
setup-hooks =
|
||||
pbr.hooks.setup_hook
|
||||
|
||||
[files]
|
||||
packages = cloudkitty
|
||||
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
ckitty-orchestrator = cloudkitty.orchestrator:main
|
||||
|
||||
[build_sphinx]
|
||||
all_files = 1
|
||||
build-dir = doc/build
|
||||
source-dir = doc/source
|
|
@ -0,0 +1,5 @@
|
|||
import setuptools
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr'],
|
||||
pbr=True)
|
|
@ -0,0 +1,7 @@
|
|||
discover
|
||||
testtools
|
||||
testrepository
|
||||
flake8
|
||||
mock>=1.0
|
||||
sphinx>=1.1.2,<1.2
|
||||
oslosphinx
|
|
@ -0,0 +1,6 @@
|
|||
import testtools
|
||||
|
||||
|
||||
class FakeTest(testtools.TestCase):
|
||||
def test_foo(self):
|
||||
pass
|
|
@ -0,0 +1,27 @@
|
|||
[tox]
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
envlist = py26,py27,py33,pep8
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
usedevelop = True
|
||||
install_command = pip install {opts} {packages}
|
||||
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
python setup.py testr --testr-args='{posargs}'
|
||||
|
||||
[tox:jenkins]
|
||||
downloadcache = ~/cache/pip
|
||||
|
||||
[testenv:pep8]
|
||||
commands =
|
||||
flake8 {posargs} cloudkitty
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[flake8]
|
||||
exclude = .git,.venv,.tox,dist,doc,*egg,build
|
Loading…
Reference in New Issue