# -*- encoding: utf-8 -*- # # Copyright © 2016 Red Hat, Inc. # Copyright © 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Time series data manipulation, better with pancetta.""" import datetime import functools import logging import math import numbers import random import re import struct import time import lz4.block import numpy import numpy.lib.recfunctions import pandas from scipy import ndimage import six # NOTE(sileht): pandas relies on time.strptime() # and often triggers http://bugs.python.org/issue7980 # its dues to our heavy threads usage, this is the workaround # to ensure the module is correctly loaded before we use really it. time.strptime("2016-02-19", "%Y-%m-%d") LOG = logging.getLogger(__name__) class NoDeloreanAvailable(Exception): """Error raised when trying to insert a value that is too old.""" def __init__(self, first_timestamp, bad_timestamp): self.first_timestamp = first_timestamp self.bad_timestamp = bad_timestamp super(NoDeloreanAvailable, self).__init__( "%s is before %s" % (bad_timestamp, first_timestamp)) class BeforeEpochError(Exception): """Error raised when a timestamp before Epoch is used.""" def __init__(self, timestamp): self.timestamp = timestamp super(BeforeEpochError, self).__init__( "%s is before Epoch" % timestamp) class UnAggregableTimeseries(Exception): """Error raised when timeseries cannot be aggregated.""" def __init__(self, reason): self.reason = reason super(UnAggregableTimeseries, self).__init__(reason) class UnknownAggregationMethod(Exception): """Error raised when the aggregation method is unknown.""" def __init__(self, agg): self.aggregation_method = agg super(UnknownAggregationMethod, self).__init__( "Unknown aggregation method `%s'" % agg) class InvalidData(ValueError): """Error raised when data are corrupted.""" def __init__(self): super(InvalidData, self).__init__("Unable to unpack, invalid data") def round_timestamp(ts, freq): return pandas.Timestamp( (pandas.Timestamp(ts).value // freq) * freq) class GroupedTimeSeries(object): def __init__(self, ts, granularity): # NOTE(sileht): The whole class assumes ts is ordered and don't have # duplicate timestamps, it uses numpy.unique that sorted list, but # we always assume the orderd to be the same as the input. freq = granularity * 10e8 self._ts = ts self.indexes = (numpy.array(ts.index, 'float') // freq) * freq self.tstamps, self.counts = numpy.unique(self.indexes, return_counts=True) def mean(self): return self._scipy_aggregate(ndimage.mean) def sum(self): return self._scipy_aggregate(ndimage.sum) def min(self): return self._scipy_aggregate(ndimage.minimum) def max(self): return self._scipy_aggregate(ndimage.maximum) def median(self): return self._scipy_aggregate(ndimage.median) def std(self): # NOTE(sileht): ndimage.standard_deviation is really more performant # but it use ddof=0, to get the same result as pandas we have to use # ddof=1. If one day scipy allow to pass ddof, this should be changed. return self._scipy_aggregate(ndimage.labeled_comprehension, remove_unique=True, func=functools.partial(numpy.std, ddof=1), out_dtype='float64', default=None) def _count(self): timestamps = numpy.array(self.tstamps, 'datetime64[ns]') return (self.counts, timestamps) def count(self): return pandas.Series(*self._count()) def last(self): counts, timestamps = self._count() cumcounts = numpy.cumsum(counts) - 1 values = self._ts.values[cumcounts] return pandas.Series(values, pandas.to_datetime(timestamps)) def first(self): counts, timestamps = self._count() counts = numpy.insert(counts[:-1], 0, 0) cumcounts = numpy.cumsum(counts) values = self._ts.values[cumcounts] return pandas.Series(values, pandas.to_datetime(timestamps)) def quantile(self, q): return self._scipy_aggregate(ndimage.labeled_comprehension, func=functools.partial( numpy.percentile, q=q, ), out_dtype='float64', default=None) def _scipy_aggregate(self, method, remove_unique=False, *args, **kwargs): if remove_unique: tstamps = self.tstamps[self.counts > 1] else: tstamps = self.tstamps if len(tstamps) == 0: return pandas.Series() values = method(self._ts.values, self.indexes, tstamps, *args, **kwargs) timestamps = numpy.array(tstamps, 'datetime64[ns]') return pandas.Series(values, pandas.to_datetime(timestamps)) class TimeSerie(object): """A representation of series of a timestamp with a value. Duplicate timestamps are not allowed and will be filtered to use the last in the group when the TimeSerie is created or extended. """ def __init__(self, ts=None): if ts is None: ts = pandas.Series() self.ts = ts @staticmethod def clean_ts(ts): if ts.index.has_duplicates: ts = ts[~ts.index.duplicated(keep='last')] if not ts.index.is_monotonic: ts = ts.sort_index() return ts @classmethod def from_data(cls, timestamps=None, values=None, clean=False): ts = pandas.Series(values, timestamps) if clean: # For format v2 ts = cls.clean_ts(ts) return cls(ts) @classmethod def from_tuples(cls, timestamps_values): return cls.from_data(*zip(*timestamps_values)) def __eq__(self, other): return (isinstance(other, TimeSerie) and self.ts.all() == other.ts.all()) def __getitem__(self, key): return self.ts[key] def set_values(self, values): t = pandas.Series(*reversed(list(zip(*values)))) self.ts = self.clean_ts(t).combine_first(self.ts) def __len__(self): return len(self.ts) @staticmethod def _timestamps_and_values_from_dict(values): timestamps = numpy.array(list(values.keys()), dtype='datetime64[ns]') timestamps = pandas.to_datetime(timestamps) v = list(values.values()) if v: return timestamps, v return (), () @staticmethod def _to_offset(value): if isinstance(value, numbers.Real): return pandas.tseries.offsets.Nano(value * 10e8) return pandas.tseries.frequencies.to_offset(value) @property def first(self): try: return self.ts.index[0] except IndexError: return @property def last(self): try: return self.ts.index[-1] except IndexError: return def group_serie(self, granularity, start=0): # NOTE(jd) Our whole serialization system is based on Epoch, and we # store unsigned integer, so we can't store anything before Epoch. # Sorry! if self.ts.index[0].value < 0: raise BeforeEpochError(self.ts.index[0]) return GroupedTimeSeries(self.ts[start:], granularity) @staticmethod def _compress(payload): # FIXME(jd) lz4 > 0.9.2 returns bytearray instead of bytes. But Cradox # does not accept bytearray but only bytes, so make sure that we have a # byte type returned. return memoryview(lz4.block.compress(payload)).tobytes() class BoundTimeSerie(TimeSerie): def __init__(self, ts=None, block_size=None, back_window=0): """A time serie that is limited in size. Used to represent the full-resolution buffer of incoming raw datapoints associated with a metric. The maximum size of this time serie is expressed in a number of block size, called the back window. When the timeserie is truncated, a whole block is removed. You cannot set a value using a timestamp that is prior to the last timestamp minus this number of blocks. By default, a back window of 0 does not allow you to go back in time prior to the current block being used. """ super(BoundTimeSerie, self).__init__(ts) self.block_size = self._to_offset(block_size) self.back_window = back_window self._truncate() @classmethod def from_data(cls, timestamps=None, values=None, block_size=None, back_window=0): return cls(pandas.Series(values, timestamps), block_size=block_size, back_window=back_window) def __eq__(self, other): return (isinstance(other, BoundTimeSerie) and super(BoundTimeSerie, self).__eq__(other) and self.block_size == other.block_size and self.back_window == other.back_window) def set_values(self, values, before_truncate_callback=None, ignore_too_old_timestamps=False): # NOTE: values must be sorted when passed in. if self.block_size is not None and not self.ts.empty: first_block_timestamp = self.first_block_timestamp() if ignore_too_old_timestamps: for index, (timestamp, value) in enumerate(values): if timestamp >= first_block_timestamp: values = values[index:] break else: values = [] else: # Check that the smallest timestamp does not go too much back # in time. smallest_timestamp = values[0][0] if smallest_timestamp < first_block_timestamp: raise NoDeloreanAvailable(first_block_timestamp, smallest_timestamp) super(BoundTimeSerie, self).set_values(values) if before_truncate_callback: before_truncate_callback(self) self._truncate() _SERIALIZATION_TIMESTAMP_VALUE_LEN = struct.calcsize("" % (self.__class__.__name__, repr(self.key), self._carbonara_sampling) class AggregatedTimeSerie(TimeSerie): _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct") PADDED_SERIAL_LEN = struct.calcsize("" % ( self.__class__.__name__, id(self), self.sampling, self.max_size, self.aggregation_method, ) @staticmethod def is_compressed(serialized_data): """Check whatever the data was serialized with compression.""" return six.indexbytes(serialized_data, 0) == ord("c") @classmethod def unserialize(cls, data, start, agg_method, sampling): x, y = [], [] start = float(start) if data: if cls.is_compressed(data): # Compressed format uncompressed = lz4.block.decompress( memoryview(data)[1:].tobytes()) nb_points = len(uncompressed) // cls.COMPRESSED_SERIAL_LEN timestamps_raw = uncompressed[ :nb_points*cls.COMPRESSED_TIMESPAMP_LEN] try: y = numpy.frombuffer(timestamps_raw, dtype=' 0 and (right_boundary_ts == left_boundary_ts or (right_boundary_ts is None and maybe_next_timestamp_is_left_boundary))): LOG.debug("We didn't find points that overlap in those " "timeseries. " "right_boundary_ts=%(right_boundary_ts)s, " "left_boundary_ts=%(left_boundary_ts)s, " "groups=%(groups)s", { 'right_boundary_ts': right_boundary_ts, 'left_boundary_ts': left_boundary_ts, 'groups': list(grouped) }) raise UnAggregableTimeseries('No overlap') # NOTE(sileht): this call the aggregation method on already # aggregated values, for some kind of aggregation this can # result can looks weird, but this is the best we can do # because we don't have anymore the raw datapoints in those case. # FIXME(sileht): so should we bailout is case of stddev, percentile # and median? agg_timeserie = getattr(grouped, aggregation)() agg_timeserie = agg_timeserie.dropna().reset_index() if from_timestamp is None and left_boundary_ts: agg_timeserie = agg_timeserie[ agg_timeserie['timestamp'] >= left_boundary_ts] if to_timestamp is None and right_boundary_ts: agg_timeserie = agg_timeserie[ agg_timeserie['timestamp'] <= right_boundary_ts] points = (agg_timeserie.sort_values(by=['granularity', 'timestamp'], ascending=[0, 1]).itertuples()) return [(timestamp, granularity, value) for __, timestamp, granularity, value in points] if __name__ == '__main__': import sys args = sys.argv[1:] if not args or "--boundtimeserie" in args: BoundTimeSerie.benchmark() if not args or "--aggregatedtimeserie" in args: AggregatedTimeSerie.benchmark()