fix tox python3 overrides
We want to default to running all tox environments under python 3, so set the basepython value in each environment. We do not want to specify a minor version number, because we do not want to have to update the file every time we upgrade python. We do not want to set the override once in testenv, because that breaks the more specific versions used in default environments like py35 and py36. Change-Id: I12967d5f5e707efe2b271b28bc7ea4b40e7f1c15
This commit is contained in:
@@ -16,6 +16,7 @@ import os
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from six import text_type
|
||||||
|
|
||||||
import monasca_agent.collector.checks as checks
|
import monasca_agent.collector.checks as checks
|
||||||
|
|
||||||
@@ -77,7 +78,7 @@ class Crash(checks.AgentCheck):
|
|||||||
|
|
||||||
# Return the date-/timestamp of the most recent crash
|
# Return the date-/timestamp of the most recent crash
|
||||||
if dump_count == 1:
|
if dump_count == 1:
|
||||||
value_meta = {'latest': unicode(dt)}
|
value_meta = {'latest': text_type(dt)}
|
||||||
|
|
||||||
log.debug('dump_count: %s', dump_count)
|
log.debug('dump_count: %s', dump_count)
|
||||||
self.gauge('crash.dump_count', dump_count, dimensions=dimensions,
|
self.gauge('crash.dump_count', dump_count, dimensions=dimensions,
|
||||||
|
|||||||
@@ -147,7 +147,7 @@ class Disk(checks.AgentCheck):
|
|||||||
|
|
||||||
# automatically ignore filesystems not backed by a device
|
# automatically ignore filesystems not backed by a device
|
||||||
try:
|
try:
|
||||||
for nodevfs in filter(lambda x: x.startswith('nodev\t'), file('/proc/filesystems')):
|
for nodevfs in filter(lambda x: x.startswith('nodev\t'), open('/proc/filesystems')):
|
||||||
file_system_list.add(nodevfs.partition('\t')[2].strip())
|
file_system_list.add(nodevfs.partition('\t')[2].strip())
|
||||||
except IOError:
|
except IOError:
|
||||||
log.debug('Failed reading /proc/filesystems')
|
log.debug('Failed reading /proc/filesystems')
|
||||||
|
|||||||
@@ -204,10 +204,10 @@ class Docker(checks.AgentCheck):
|
|||||||
container_network_dimensions['interface'] = interface_name
|
container_network_dimensions['interface'] = interface_name
|
||||||
network_values = cols[1].split()
|
network_values = cols[1].split()
|
||||||
self._report_rate_gauge_metric(
|
self._report_rate_gauge_metric(
|
||||||
"container.net.in_bytes", long(
|
"container.net.in_bytes", int(
|
||||||
network_values[0]), container_network_dimensions)
|
network_values[0]), container_network_dimensions)
|
||||||
self._report_rate_gauge_metric(
|
self._report_rate_gauge_metric(
|
||||||
"container.net.out_bytes", long(
|
"container.net.out_bytes", int(
|
||||||
network_values[8]), container_network_dimensions)
|
network_values[8]), container_network_dimensions)
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -16,6 +16,8 @@ from glob import glob
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from six import moves
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from xml.etree.ElementTree import ElementTree
|
from xml.etree.ElementTree import ElementTree
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -101,7 +103,7 @@ class Jenkins(AgentCheck):
|
|||||||
if len(dirs) > 0:
|
if len(dirs) > 0:
|
||||||
dirs = sorted(dirs, reverse=True)
|
dirs = sorted(dirs, reverse=True)
|
||||||
# We try to get the last valid build
|
# We try to get the last valid build
|
||||||
for index in xrange(0, len(dirs) - 1):
|
for index in moves.range(0, len(dirs) - 1):
|
||||||
dir_name = dirs[index]
|
dir_name = dirs[index]
|
||||||
try:
|
try:
|
||||||
timestamp = self._extract_timestamp(dir_name)
|
timestamp = self._extract_timestamp(dir_name)
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ class WrapMK(AgentCheck):
|
|||||||
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||||
try:
|
try:
|
||||||
s.connect(socket_path)
|
s.connect(socket_path)
|
||||||
except socket.error, (err):
|
except socket.error as err:
|
||||||
self.log.error("Error connecting to {0}: {1}".format(socket_path,
|
self.log.error("Error connecting to {0}: {1}".format(socket_path,
|
||||||
str(err)))
|
str(err)))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@@ -17,8 +17,9 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
import monasca_agent.collector.checks as checks
|
from six import text_type
|
||||||
|
|
||||||
|
import monasca_agent.collector.checks as checks
|
||||||
|
|
||||||
GAUGE = "gauge"
|
GAUGE = "gauge"
|
||||||
RATE = "rate"
|
RATE = "rate"
|
||||||
@@ -254,7 +255,7 @@ class MySql(checks.AgentCheck):
|
|||||||
return self._collect_type(key, dict, float)
|
return self._collect_type(key, dict, float)
|
||||||
|
|
||||||
def _collect_string(self, key, dict):
|
def _collect_string(self, key, dict):
|
||||||
return self._collect_type(key, dict, unicode)
|
return self._collect_type(key, dict, text_type)
|
||||||
|
|
||||||
def _collect_type(self, key, dict, the_type):
|
def _collect_type(self, key, dict, the_type):
|
||||||
self.log.debug("Collecting data with %s" % key)
|
self.log.debug("Collecting data with %s" % key)
|
||||||
@@ -307,7 +308,7 @@ class MySql(checks.AgentCheck):
|
|||||||
def _collect_system_metrics(self, host, db, dimensions):
|
def _collect_system_metrics(self, host, db, dimensions):
|
||||||
pid = None
|
pid = None
|
||||||
# The server needs to run locally, accessed by TCP or socket
|
# The server needs to run locally, accessed by TCP or socket
|
||||||
if host in ["localhost", "127.0.0.1"] or db.port == long(0):
|
if host in ["localhost", "127.0.0.1"] or db.port == int(0):
|
||||||
pid = self._get_server_pid(db)
|
pid = self._get_server_pid(db)
|
||||||
|
|
||||||
if pid:
|
if pid:
|
||||||
|
|||||||
@@ -35,9 +35,9 @@ class Varnish(AgentCheck):
|
|||||||
if name == "stat":
|
if name == "stat":
|
||||||
m_name = self.normalize(self._current_metric)
|
m_name = self.normalize(self._current_metric)
|
||||||
if self._current_type in ("a", "c"):
|
if self._current_type in ("a", "c"):
|
||||||
self.rate(m_name, long(self._current_value))
|
self.rate(m_name, int(self._current_value))
|
||||||
elif self._current_type in ("i", "g"):
|
elif self._current_type in ("i", "g"):
|
||||||
self.gauge(m_name, long(self._current_value))
|
self.gauge(m_name, int(self._current_value))
|
||||||
else:
|
else:
|
||||||
# Unsupported data type, ignore
|
# Unsupported data type, ignore
|
||||||
self._reset()
|
self._reset()
|
||||||
@@ -53,7 +53,7 @@ class Varnish(AgentCheck):
|
|||||||
data = data.strip()
|
data = data.strip()
|
||||||
if len(data) > 0 and self._current_element != "":
|
if len(data) > 0 and self._current_element != "":
|
||||||
if self._current_element == "value":
|
if self._current_element == "value":
|
||||||
self._current_value = long(data)
|
self._current_value = int(data)
|
||||||
elif self._current_element == "flag":
|
elif self._current_element == "flag":
|
||||||
self._current_type = data
|
self._current_type = data
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -222,8 +222,8 @@ class VCenterCheck(AgentCheck):
|
|||||||
def _process_storage_data(self, datastore_stats, managed_cluster):
|
def _process_storage_data(self, datastore_stats, managed_cluster):
|
||||||
shared_ds = self._get_shared_datastores(datastore_stats,
|
shared_ds = self._get_shared_datastores(datastore_stats,
|
||||||
managed_cluster)
|
managed_cluster)
|
||||||
capacity = 0L
|
capacity = 0
|
||||||
freeSpace = 0L
|
freeSpace = 0
|
||||||
self.log.info("Polling for the datastores: " + str(shared_ds))
|
self.log.info("Polling for the datastores: " + str(shared_ds))
|
||||||
for object_contents in datastore_stats:
|
for object_contents in datastore_stats:
|
||||||
for object_content in object_contents[1]:
|
for object_content in object_contents[1]:
|
||||||
@@ -237,12 +237,12 @@ class VCenterCheck(AgentCheck):
|
|||||||
"of datastore: %s in cluster: "
|
"of datastore: %s in cluster: "
|
||||||
"%s" %
|
"%s" %
|
||||||
(ds_mor, managed_cluster))
|
(ds_mor, managed_cluster))
|
||||||
capacity += long(propSet.val)
|
capacity += int(propSet.val)
|
||||||
elif propSet.name == 'summary.freeSpace':
|
elif propSet.name == 'summary.freeSpace':
|
||||||
self.log.debug("Calculating freeSpace of "
|
self.log.debug("Calculating freeSpace of "
|
||||||
"datastore: %s in cluster: %s"
|
"datastore: %s in cluster: %s"
|
||||||
% (ds_mor, managed_cluster))
|
% (ds_mor, managed_cluster))
|
||||||
freeSpace += long(propSet.val)
|
freeSpace += int(propSet.val)
|
||||||
usedSpace = capacity - freeSpace
|
usedSpace = capacity - freeSpace
|
||||||
self.log.debug("Total capacity:" + str(capacity) +
|
self.log.debug("Total capacity:" + str(capacity) +
|
||||||
" used:" + str(usedSpace) + " free:" + str(freeSpace))
|
" used:" + str(usedSpace) + " free:" + str(freeSpace))
|
||||||
|
|||||||
@@ -136,11 +136,11 @@ class Zookeeper(AgentCheck):
|
|||||||
|
|
||||||
# Received: 101032173
|
# Received: 101032173
|
||||||
_, value = buf.readline().split(':')
|
_, value = buf.readline().split(':')
|
||||||
metrics.append(('zookeeper.in_bytes', long(value.strip())))
|
metrics.append(('zookeeper.in_bytes', int(value.strip())))
|
||||||
|
|
||||||
# Sent: 1324
|
# Sent: 1324
|
||||||
_, value = buf.readline().split(':')
|
_, value = buf.readline().split(':')
|
||||||
metrics.append(('zookeeper.out_bytes', long(value.strip())))
|
metrics.append(('zookeeper.out_bytes', int(value.strip())))
|
||||||
|
|
||||||
if has_connections_val:
|
if has_connections_val:
|
||||||
# Connections: 1
|
# Connections: 1
|
||||||
@@ -153,12 +153,12 @@ class Zookeeper(AgentCheck):
|
|||||||
|
|
||||||
# Outstanding: 0
|
# Outstanding: 0
|
||||||
_, value = buf.readline().split(':')
|
_, value = buf.readline().split(':')
|
||||||
metrics.append(('zookeeper.outstanding_bytes', long(value.strip())))
|
metrics.append(('zookeeper.outstanding_bytes', int(value.strip())))
|
||||||
|
|
||||||
# Zxid: 0x1034799c7
|
# Zxid: 0x1034799c7
|
||||||
_, value = buf.readline().split(':')
|
_, value = buf.readline().split(':')
|
||||||
# Parse as a 64 bit hex int
|
# Parse as a 64 bit hex int
|
||||||
zxid = long(value.strip(), 16)
|
zxid = int(value.strip(), 16)
|
||||||
# convert to bytes
|
# convert to bytes
|
||||||
zxid_bytes = struct.pack('>q', zxid)
|
zxid_bytes = struct.pack('>q', zxid)
|
||||||
# the higher order 4 bytes is the epoch
|
# the higher order 4 bytes is the epoch
|
||||||
@@ -175,6 +175,6 @@ class Zookeeper(AgentCheck):
|
|||||||
|
|
||||||
# Node count: 487
|
# Node count: 487
|
||||||
_, value = buf.readline().split(':')
|
_, value = buf.readline().split(':')
|
||||||
metrics.append(('zookeeper.node_count', long(value.strip())))
|
metrics.append(('zookeeper.node_count', int(value.strip())))
|
||||||
|
|
||||||
return metrics, dimensions
|
return metrics, dimensions
|
||||||
|
|||||||
@@ -103,7 +103,7 @@ class JMXFetch(object):
|
|||||||
'timestamp': time.time(),
|
'timestamp': time.time(),
|
||||||
'invalid_checks': invalid_checks
|
'invalid_checks': invalid_checks
|
||||||
}
|
}
|
||||||
stream = file(os.path.join(tempfile.gettempdir(), PYTHON_JMX_STATUS_FILE), 'w')
|
stream = open(os.path.join(tempfile.gettempdir(), PYTHON_JMX_STATUS_FILE), 'w')
|
||||||
yaml.dump(data, stream)
|
yaml.dump(data, stream)
|
||||||
stream.close()
|
stream.close()
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
"""Implementation of Inspector abstraction for Hyper-V"""
|
"""Implementation of Inspector abstraction for Hyper-V"""
|
||||||
|
|
||||||
|
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
|
||||||
from monasca_agent.collector.virt.hyperv import utilsv2
|
from monasca_agent.collector.virt.hyperv import utilsv2
|
||||||
@@ -40,7 +41,7 @@ class HyperVInspector(virt_inspector.Inspector):
|
|||||||
cpu_percent_used = (cpu_clock_used /
|
cpu_percent_used = (cpu_clock_used /
|
||||||
float(host_cpu_clock * cpu_count))
|
float(host_cpu_clock * cpu_count))
|
||||||
# Nanoseconds
|
# Nanoseconds
|
||||||
cpu_time = (long(uptime * cpu_percent_used) * units.k)
|
cpu_time = (int(uptime * cpu_percent_used) * units.k)
|
||||||
|
|
||||||
return virt_inspector.CPUStats(number=cpu_count, time=cpu_time)
|
return virt_inspector.CPUStats(number=cpu_count, time=cpu_time)
|
||||||
|
|
||||||
|
|||||||
@@ -86,11 +86,11 @@ class UtilsV2(object):
|
|||||||
|
|
||||||
cpu_used = 0
|
cpu_used = 0
|
||||||
if cpu_metric_aggr:
|
if cpu_metric_aggr:
|
||||||
cpu_used = long(cpu_metric_aggr[0].MetricValue)
|
cpu_used = int(cpu_metric_aggr[0].MetricValue)
|
||||||
|
|
||||||
return (cpu_used,
|
return (cpu_used,
|
||||||
int(cpu_sd.VirtualQuantity),
|
int(cpu_sd.VirtualQuantity),
|
||||||
long(vm.OnTimeInMilliseconds))
|
int(vm.OnTimeInMilliseconds))
|
||||||
|
|
||||||
def get_memory_metrics(self, vm_name):
|
def get_memory_metrics(self, vm_name):
|
||||||
vm = self._lookup_vm(vm_name)
|
vm = self._lookup_vm(vm_name)
|
||||||
@@ -98,7 +98,7 @@ class UtilsV2(object):
|
|||||||
metric_memory = self._get_metrics(vm, memory_def)
|
metric_memory = self._get_metrics(vm, memory_def)
|
||||||
memory_usage = 0
|
memory_usage = 0
|
||||||
if metric_memory:
|
if metric_memory:
|
||||||
memory_usage = long(metric_memory[0].MetricValue)
|
memory_usage = int(metric_memory[0].MetricValue)
|
||||||
return memory_usage
|
return memory_usage
|
||||||
|
|
||||||
def get_vnic_metrics(self, vm_name):
|
def get_vnic_metrics(self, vm_name):
|
||||||
@@ -180,7 +180,7 @@ class UtilsV2(object):
|
|||||||
def _sum_metric_values(metrics):
|
def _sum_metric_values(metrics):
|
||||||
tot_metric_val = 0
|
tot_metric_val = 0
|
||||||
for metric in metrics:
|
for metric in metrics:
|
||||||
tot_metric_val += long(metric.MetricValue)
|
tot_metric_val += int(metric.MetricValue)
|
||||||
return tot_metric_val
|
return tot_metric_val
|
||||||
|
|
||||||
def _sum_metric_values_by_defs(self, element_metrics, metric_defs):
|
def _sum_metric_values_by_defs(self, element_metrics, metric_defs):
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ class XenapiInspector(virt_inspector.Inspector):
|
|||||||
metrics_rec = self._call_xenapi("VM_metrics.get_record",
|
metrics_rec = self._call_xenapi("VM_metrics.get_record",
|
||||||
metrics_ref)
|
metrics_ref)
|
||||||
# Stat provided from XenServer is in B, converting it to MB.
|
# Stat provided from XenServer is in B, converting it to MB.
|
||||||
memory = long(metrics_rec['memory_actual']) / units.Mi
|
memory = int(metrics_rec['memory_actual']) / units.Mi
|
||||||
return virt_inspector.MemoryUsageStats(usage=memory)
|
return virt_inspector.MemoryUsageStats(usage=memory)
|
||||||
|
|
||||||
def inspect_vnic_rates(self, instance, duration=None):
|
def inspect_vnic_rates(self, instance, duration=None):
|
||||||
|
|||||||
@@ -132,9 +132,9 @@ class Daemon(object):
|
|||||||
# Redirect standard file descriptors
|
# Redirect standard file descriptors
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
si = file(self.stdin, 'r')
|
si = open(self.stdin, 'r')
|
||||||
so = file(self.stdout, 'a+')
|
so = open(self.stdout, 'a+')
|
||||||
se = file(self.stderr, 'a+', 0)
|
se = open(self.stderr, 'a+', 0)
|
||||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||||
@@ -251,7 +251,7 @@ class Daemon(object):
|
|||||||
def pid(self):
|
def pid(self):
|
||||||
# Get the pid from the pidfile
|
# Get the pid from the pidfile
|
||||||
try:
|
try:
|
||||||
pf = file(self.pidfile, 'r')
|
pf = open(self.pidfile, 'r')
|
||||||
pid = int(pf.read().strip())
|
pid = int(pf.read().strip())
|
||||||
pf.close()
|
pf.close()
|
||||||
return pid
|
return pid
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ import uuid
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
|
from six import integer_types
|
||||||
|
|
||||||
from numbers import Number
|
from numbers import Number
|
||||||
|
|
||||||
@@ -44,7 +45,7 @@ VALID_HOSTNAME_RFC_1123_PATTERN = re.compile(
|
|||||||
MAX_HOSTNAME_LEN = 255
|
MAX_HOSTNAME_LEN = 255
|
||||||
LOGGING_MAX_BYTES = 5 * 1024 * 1024
|
LOGGING_MAX_BYTES = 5 * 1024 * 1024
|
||||||
|
|
||||||
NumericTypes = (float, int, long)
|
NumericTypes = (float,) + integer_types
|
||||||
|
|
||||||
import monasca_agent.common.config as configuration
|
import monasca_agent.common.config as configuration
|
||||||
from monasca_agent.common.exceptions import PathNotFound
|
from monasca_agent.common.exceptions import PathNotFound
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ import signal
|
|||||||
import socket
|
import socket
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from six import text_type
|
||||||
|
|
||||||
# set up logging before importing any other components
|
# set up logging before importing any other components
|
||||||
import monasca_agent.common.util as util
|
import monasca_agent.common.util as util
|
||||||
util.initialize_logging('forwarder')
|
util.initialize_logging('forwarder')
|
||||||
@@ -43,6 +45,7 @@ import tornado.web
|
|||||||
import monasca_agent.common.config as cfg
|
import monasca_agent.common.config as cfg
|
||||||
import monasca_agent.forwarder.api.monasca_api as mon
|
import monasca_agent.forwarder.api.monasca_api as mon
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger('forwarder')
|
log = logging.getLogger('forwarder')
|
||||||
|
|
||||||
# Max amount of iterations to wait to meet min batch size before flushing
|
# Max amount of iterations to wait to meet min batch size before flushing
|
||||||
@@ -219,10 +222,10 @@ def main():
|
|||||||
skip_ssl_validation = False
|
skip_ssl_validation = False
|
||||||
use_simple_http_client = False
|
use_simple_http_client = False
|
||||||
|
|
||||||
if unicode(tornado.options.options.sslcheck) == u"0":
|
if text_type(tornado.options.options.sslcheck) == u"0":
|
||||||
skip_ssl_validation = True
|
skip_ssl_validation = True
|
||||||
|
|
||||||
if unicode(tornado.options.options.use_simple_http_client) == u"1":
|
if text_type(tornado.options.options.use_simple_http_client) == u"1":
|
||||||
use_simple_http_client = True
|
use_simple_http_client = True
|
||||||
|
|
||||||
# If we don't have any arguments, run the server.
|
# If we don't have any arguments, run the server.
|
||||||
|
|||||||
5
tox.ini
5
tox.ini
@@ -37,6 +37,7 @@ commands =
|
|||||||
coverage report
|
coverage report
|
||||||
|
|
||||||
[testenv:bandit]
|
[testenv:bandit]
|
||||||
|
basepython = python3
|
||||||
commands =
|
commands =
|
||||||
# B101 - asserts used on purpose
|
# B101 - asserts used on purpose
|
||||||
# Following rules should be fixed in future
|
# Following rules should be fixed in future
|
||||||
@@ -60,17 +61,20 @@ commands =
|
|||||||
bandit -r monasca_agent -n5 -s B101,B602,B603,B301,B303,B311,B403,B404,B405,B310,B320,B410,B411,B501,B504,B605,B607,B608 -x {toxinidir}/tests
|
bandit -r monasca_agent -n5 -s B101,B602,B603,B301,B303,B311,B403,B404,B405,B310,B320,B410,B411,B501,B504,B605,B607,B608 -x {toxinidir}/tests
|
||||||
|
|
||||||
[testenv:flake8]
|
[testenv:flake8]
|
||||||
|
basepython = python3
|
||||||
commands =
|
commands =
|
||||||
flake8 monasca_agent
|
flake8 monasca_agent
|
||||||
flake8 monasca_setup
|
flake8 monasca_setup
|
||||||
flake8 tests
|
flake8 tests
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
|
basepython = python3
|
||||||
commands =
|
commands =
|
||||||
{[testenv:flake8]commands}
|
{[testenv:flake8]commands}
|
||||||
{[testenv:bandit]commands}
|
{[testenv:bandit]commands}
|
||||||
|
|
||||||
[testenv:venv]
|
[testenv:venv]
|
||||||
|
basepython = python3
|
||||||
commands = {posargs}
|
commands = {posargs}
|
||||||
|
|
||||||
[testenv:bindep]
|
[testenv:bindep]
|
||||||
@@ -78,6 +82,7 @@ commands = {posargs}
|
|||||||
# system dependencies are missing, since it's used to tell you what system
|
# system dependencies are missing, since it's used to tell you what system
|
||||||
# dependencies are missing! This also means that bindep must be installed
|
# dependencies are missing! This also means that bindep must be installed
|
||||||
# separately, outside of the requirements files.
|
# separately, outside of the requirements files.
|
||||||
|
basepython = python3
|
||||||
deps = bindep
|
deps = bindep
|
||||||
commands = bindep test
|
commands = bindep test
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user