Add a way to test multipip against many samples

Add a test that can read from a data test yaml file
to test alot of multipip version checks at once, making
it easy to add new tests (and delete older ones) in a
relatively simpler manner.

- Move all current sample tests to this new format.
- Fix a bug in allowing variations of matching x<1, x<4
  as a good combination.

Change-Id: I66b806c6d1d804c0de4957f95dda93b0390f94c0
This commit is contained in:
Joshua Harlow
2014-03-28 08:19:31 -07:00
committed by Joshua Harlow
parent cc1c99c791
commit d3e2377269
4 changed files with 91 additions and 49 deletions

View File

@@ -14,17 +14,43 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import glob
import re
import sys
from anvil import shell as sh from anvil import shell as sh
from anvil import test from anvil import test
from anvil import utils
from nose_parameterized import parameterized
EXAMPLE_GLOB = sh.joinpths("data", "tests", "requirements*.yaml")
def load_examples():
examples = []
for filename in glob.glob(EXAMPLE_GLOB):
if sh.isfile(filename):
# The test generator will use the first element as the test
# identifer so provide a filename + index based test identifer to
# be able to connect test failures to the example which caused it.
try:
base = sh.basename(filename)
base = re.sub(r"[.\s]", "_", base)
for i, example in enumerate(utils.load_yaml(filename)):
examples.append(("%s_%s" % (base, i), example))
except IOError:
pass
return examples
class TestTools(test.TestCase): class TestTools(test.TestCase):
def setUp(self): def setUp(self):
super(TestTools, self).setUp() super(TestTools, self).setUp()
self.multipip = sh.which("multipip", ['tools']) self.multipip = [sys.executable, sh.which("multipip", ['tools'])]
def _run_multipip(self, versions): def _run_multipip(self, versions):
cmd = [self.multipip] cmd = list(self.multipip)
cmd.extend(versions) cmd.extend(versions)
return sh.execute(cmd, check_exit_code=False) return sh.execute(cmd, check_exit_code=False)
@@ -56,47 +82,11 @@ class TestTools(test.TestCase):
pass pass
return conflicts return conflicts
def test_multipip_ok(self): @parameterized.expand(load_examples())
versions = [ def test_example(self, _name, example):
"x>1", (stdout, stderr) = self._run_multipip(example['requirements'])
"x>2",
]
(stdout, stderr) = self._run_multipip(versions)
stdout = stdout.strip() stdout = stdout.strip()
self.assertEqual("x>1,>2", stdout) self.assertEqual(example['expected'], stdout)
self.assertEqual({}, self._extract_conflicts(stderr)) if 'conflicts' in example:
self.assertEqual(example['conflicts'],
def test_multipip_varied(self): self._extract_conflicts(stderr))
versions = [
'x!=2',
'x!=3',
"y>3",
]
(stdout, stderr) = self._run_multipip(versions)
stdout = stdout.strip()
self.assertEqual({}, self._extract_conflicts(stderr))
self.assertEqual("x!=2,!=3\ny>3", stdout)
def test_multipip_best_pick(self):
versions = [
"x>1",
"x>=2",
"x!=2",
]
(stdout, stderr) = self._run_multipip(versions)
stdout = stdout.strip()
self.assertEqual('x>1,!=2', stdout)
self.assertEqual(["x>=2"], self._extract_conflicts(stderr)['x'])
def test_multipip_best_pick_again(self):
versions = [
"x>1",
"x>=2",
"x!=2",
'x>4',
'x>5',
]
(stdout, stderr) = self._run_multipip(versions)
stdout = stdout.strip()
self.assertEqual('x>1,!=2,>4,>5', stdout)
self.assertEqual(["x>=2"], self._extract_conflicts(stderr)['x'])

View File

@@ -0,0 +1,43 @@
---
- expected: a>1
requirements:
- a>1
- a>2
- expected: a<0.5
requirements:
# Both are mutually incompat., due to sorting the lower one will be selected first.
- a>1
- a<0.5
conflicts:
a: ["a>1"]
- expected: a>1
requirements:
# More requests for >1 should then select >1
- a>1
- a>1
- a<0.5
conflicts:
a: ["a<0.5"]
- expected: "x!=2,!=3\ny>3"
requirements:
- "x!=2"
- "x!=3"
- "y>3"
- expected: "x>1"
requirements:
- x>1
- x>2
- expected: 'x>1,!=2'
requirements:
- x>1
- x>=2
- x!=2
- expected: "x>1,!=2"
requirements:
- x>1
- x>=2
- x!=2
- x>4
- x>5
...

View File

@@ -3,3 +3,4 @@ hacking>=0.8.0,<0.9
mock>=1.0 mock>=1.0
nose nose
testtools>=0.9.34 testtools>=0.9.34
nose-parameterized

View File

@@ -141,7 +141,11 @@ def conflict_scorer(versioned):
continue continue
if op in ["<", "<="] and version2 <= version: if op in ["<", "<="] and version2 <= version:
score += 1 score += 1
if op in ["==", ">="] and version2 == version: elif op in ["==", ">="] and version2 == version:
score += 1
elif op == ">" and version2 < version:
score += 1
elif op == ">=" and version2 <= version:
score += 1 score += 1
for version in sorted(op_versions.get(">=", [])): for version in sorted(op_versions.get(">=", [])):
for (op, version2) in versioned: for (op, version2) in versioned:
@@ -149,7 +153,7 @@ def conflict_scorer(versioned):
continue continue
if op in ["<", "<="] and version2 < version: if op in ["<", "<="] and version2 < version:
score += 1 score += 1
if op in [">=", ">"] and version2 < version: elif op in [">=", ">"] and version2 < version:
score += 1 score += 1
for version in sorted(op_versions.get("<", [])): for version in sorted(op_versions.get("<", [])):
for (op, version2) in versioned: for (op, version2) in versioned:
@@ -157,7 +161,11 @@ def conflict_scorer(versioned):
continue continue
if op in [">", ">="] and version2 >= version: if op in [">", ">="] and version2 >= version:
score += 1 score += 1
if op in ["==", "<="] and version2 == version: elif op in ["==", "<="] and version2 == version:
score += 1
elif op == "<" and version2 > version:
score += 1
elif op == "<=" and version2 >= version:
score += 1 score += 1
for version in sorted(op_versions.get("<=", [])): for version in sorted(op_versions.get("<=", [])):
for (op, version2) in versioned: for (op, version2) in versioned:
@@ -165,7 +173,7 @@ def conflict_scorer(versioned):
continue continue
if op in [">", ">="] and version2 > version: if op in [">", ">="] and version2 > version:
score += 1 score += 1
if op in ["<=", "<"] and version2 > version: elif op in ["<=", "<"] and version2 > version:
score += 1 score += 1
return score return score