Add basic benchmark

Benchmark shows performance penalty rootwrap introduces compared to one of
plain sudo.

To run benchmark, issue: tox -e benchmark

Example output:

Running 'ip a':
        method          :    min       avg       max       dev
                   ip a :   4.185ms   4.570ms   5.657ms 187.705us
              sudo ip a :  13.564ms  14.437ms  28.452ms   1.470ms
sudo rootwrap conf ip a : 148.839ms 192.424ms 254.043ms  19.219ms
Running 'ip netns exec bench_ns ip a':
                    method                     :    min       avg       max       dev
              sudo ip netns exec bench_ns ip a : 109.772ms 151.627ms 209.943ms  22.991ms
sudo rootwrap conf ip netns exec bench_ns ip a : 289.345ms 345.471ms 463.807ms  32.873ms

Change-Id: Id8e41be6602fa8dcff48a8a4ba44d35dd3043731
This commit is contained in:
Yuriy Taraday 2014-07-15 18:36:34 +04:00
parent e2e0c03f5a
commit 196bdc0fa2
4 changed files with 112 additions and 0 deletions

101
benchmark/benchmark.py Normal file
View File

@ -0,0 +1,101 @@
# Copyright (c) 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import atexit
import math
import os
import subprocess
import sys
import timeit
config_path = "rootwrap.conf"
num_iterations = 100
def run_plain(cmd):
obj = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = obj.communicate()
return obj.returncode, out, err
def run_sudo(cmd):
return run_plain(["sudo"] + cmd)
def run_rootwrap(cmd):
return run_plain([
"sudo", sys.executable, "-c",
"from oslo.rootwrap import cmd; cmd.main()", config_path] + cmd)
def run_one(runner, cmd):
def __inner():
code, out, err = runner(cmd)
assert err == "", "Stderr not empty:\n" + err
assert code == 0, "Command failed"
return __inner
runners = [
("{0}", run_plain),
("sudo {0}", run_sudo),
("sudo rootwrap conf {0}", run_rootwrap),
]
def get_time_string(sec):
if sec > 0.9:
return "{0:7.3f}s ".format(sec)
elif sec > 0.0009:
return "{0:7.3f}ms".format(sec * 1000.0)
else:
return "{0:7.3f}us".format(sec * 1000000.0)
def run_bench(cmd, runners):
strcmd = ' '.join(cmd)
max_name_len = max(len(name) for name, _ in runners) + len(strcmd) - 3
print("Running '{0}':".format(strcmd))
print("{0:^{1}} :".format("method", max_name_len),
"".join(map("{0:^10}".format, ["min", "avg", "max", "dev"])))
for name, runner in runners:
results = timeit.repeat(run_one(runner, cmd), repeat=num_iterations,
number=1)
avg = sum(results) / num_iterations
min_ = min(results)
max_ = max(results)
dev = math.sqrt(sum((r - avg) ** 2 for r in results) / num_iterations)
print("{0:>{1}} :".format(name.format(strcmd), max_name_len),
" ".join(map(get_time_string, [min_, avg, max_, dev])))
def main():
os.chdir(os.path.dirname(__file__))
code, _, _ = run_sudo(["-vn"])
if code:
print("We need you to authorize with sudo to run this benchmark")
run_sudo(["-v"])
run_bench(["ip", "a"], runners)
run_sudo(["ip", "netns", "add", "bench_ns"])
atexit.register(run_sudo, ["ip", "netns", "delete", "bench_ns"])
run_bench('ip netns exec bench_ns ip a'.split(), runners[1:])
if __name__ == "__main__":
main()

View File

@ -0,0 +1,4 @@
[Filters]
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

4
benchmark/rootwrap.conf Normal file
View File

@ -0,0 +1,4 @@
[DEFAULT]
filters_path=filters.d
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
use_syslog=False

View File

@ -26,3 +26,6 @@ commands = {posargs}
show-source = True show-source = True
exclude = .tox,dist,doc,*.egg,build exclude = .tox,dist,doc,*.egg,build
builtins = _ builtins = _
[testenv:benchmark]
commands = python benchmark/benchmark.py