diff options
author | Christopher Baines <mail@cbaines.net> | 2020-05-02 12:30:53 +0100 |
---|---|---|
committer | Christopher Baines <mail@cbaines.net> | 2020-05-02 15:53:36 +0100 |
commit | a8cb1e72ef351330d1521833c1b270dcc0da593f (patch) | |
tree | 14851296ad0eaa2fa4a32fa60921397bc1030953 /gnu/packages | |
parent | 20d7dbc77a2b0ed379c0a595daf8d7f852ef3a4c (diff) | |
download | guix-a8cb1e72ef351330d1521833c1b270dcc0da593f.tar guix-a8cb1e72ef351330d1521833c1b270dcc0da593f.tar.gz |
gnu: behave: Fix build with Python 3.8.
Add a patch based on an upstream commit [1].
1: c000c88eb5239b87f299c85e83b349b0ef387ae7
* gnu/packages/patches/behave-skip-a-couple-of-tests.patch: New file.
* gnu/local.mk (dist_patch_DATA): Add it.
* gnu/packages/check.scm (behave)[source]: Use the patch.
Diffstat (limited to 'gnu/packages')
-rw-r--r-- | gnu/packages/check.scm | 4 | ||||
-rw-r--r-- | gnu/packages/patches/behave-skip-a-couple-of-tests.patch | 462 |
2 files changed, 465 insertions, 1 deletions
diff --git a/gnu/packages/check.scm b/gnu/packages/check.scm index 05a796a3e3..2629e88323 100644 --- a/gnu/packages/check.scm +++ b/gnu/packages/check.scm @@ -2107,7 +2107,9 @@ backported from Python 2.7 for Python 2.4+.") (uri (pypi-uri "behave" version)) (sha256 (base32 - "11hsz365qglvpp1m1w16239c3kiw15lw7adha49lqaakm8kj6rmr")))) + "11hsz365qglvpp1m1w16239c3kiw15lw7adha49lqaakm8kj6rmr")) + (patches (search-patches + "behave-skip-a-couple-of-tests.patch")))) (build-system python-build-system) (native-inputs `(("python-mock" ,python-mock) diff --git a/gnu/packages/patches/behave-skip-a-couple-of-tests.patch b/gnu/packages/patches/behave-skip-a-couple-of-tests.patch new file mode 100644 index 0000000000..e3d4e15241 --- /dev/null +++ b/gnu/packages/patches/behave-skip-a-couple-of-tests.patch @@ -0,0 +1,462 @@ +Fix build with Python 3.8, this is a patch based on upstream commit [1]. + +1: c000c88eb5239b87f299c85e83b349b0ef387ae7 + + +diff --git a/behave.ini b/behave.ini +index 45c0f0d7..952240d6 100644 +--- a/behave.ini ++++ b/behave.ini +@@ -15,8 +15,9 @@ show_skipped = false + format = rerun + progress3 + outfiles = rerun.txt +- reports/report_progress3.txt ++ build/behave.reports/report_progress3.txt + junit = true ++junit_directory = build/behave.reports + logging_level = INFO + # logging_format = LOG.%(levelname)-8s %(name)-10s: %(message)s + # logging_format = LOG.%(levelname)-8s %(asctime)s %(name)-10s: %(message)s +diff --git a/features/environment.py b/features/environment.py +index 4744e89a..3769ee40 100644 +--- a/features/environment.py ++++ b/features/environment.py +@@ -1,5 +1,7 @@ + # -*- coding: UTF-8 -*- ++# FILE: features/environemnt.py + ++from __future__ import absolute_import, print_function + from behave.tag_matcher import ActiveTagMatcher, setup_active_tag_values + from behave4cmd0.setup_command_shell import setup_command_shell_processors4behave + import platform +@@ -20,6 +22,15 @@ + } + active_tag_matcher = ActiveTagMatcher(active_tag_value_provider) + ++ ++def print_active_tags_summary(): ++ active_tag_data = active_tag_value_provider ++ print("ACTIVE-TAG SUMMARY:") ++ print("use.with_python.version=%s" % active_tag_data.get("python.version")) ++ # print("use.with_os=%s" % active_tag_data.get("os")) ++ print() ++ ++ + # ----------------------------------------------------------------------------- + # HOOKS: + # ----------------------------------------------------------------------------- +@@ -30,11 +41,14 @@ def before_all(context): + setup_python_path() + setup_context_with_global_params_test(context) + setup_command_shell_processors4behave() ++ print_active_tags_summary() ++ + + def before_feature(context, feature): + if active_tag_matcher.should_exclude_with(feature.tags): + feature.skip(reason=active_tag_matcher.exclude_reason) + ++ + def before_scenario(context, scenario): + if active_tag_matcher.should_exclude_with(scenario.effective_tags): + scenario.skip(reason=active_tag_matcher.exclude_reason) +diff --git a/features/step.duplicated_step.feature b/features/step.duplicated_step.feature +index 59888b0f..396cca27 100644 +--- a/features/step.duplicated_step.feature ++++ b/features/step.duplicated_step.feature +@@ -32,11 +32,11 @@ Feature: Duplicated Step Definitions + AmbiguousStep: @given('I call Alice') has already been defined in + existing step @given('I call Alice') at features/steps/alice_steps.py:3 + """ +- And the command output should contain: +- """ +- File "features/steps/alice_steps.py", line 7, in <module> +- @given(u'I call Alice') +- """ ++ # -- DISABLED: Python 3.8 traceback line numbers differ w/ decorators (+1). ++ # And the command output should contain: ++ # """ ++ # File "features/steps/alice_steps.py", line 7, in <module> ++ # """ + + + Scenario: Duplicated Step Definition in another File +@@ -70,11 +70,11 @@ Feature: Duplicated Step Definitions + AmbiguousStep: @given('I call Bob') has already been defined in + existing step @given('I call Bob') at features/steps/bob1_steps.py:3 + """ +- And the command output should contain: +- """ +- File "features/steps/bob2_steps.py", line 3, in <module> +- @given('I call Bob') +- """ ++ # -- DISABLED: Python 3.8 traceback line numbers differ w/ decorators (+1). ++ # And the command output should contain: ++ # """ ++ # File "features/steps/bob2_steps.py", line 3, in <module> ++ # """ + + @xfail + Scenario: Duplicated Same Step Definition via import from another File +diff --git a/issue.features/environment.py b/issue.features/environment.py +index 2dfec751..7e48ee03 100644 +--- a/issue.features/environment.py ++++ b/issue.features/environment.py +@@ -1,5 +1,5 @@ + # -*- coding: UTF-8 -*- +-# FILE: features/environment.py ++# FILE: issue.features/environemnt.py + # pylint: disable=unused-argument + """ + Functionality: +@@ -7,17 +7,20 @@ + * active tags + """ + +-from __future__ import print_function ++ ++from __future__ import absolute_import, print_function + import sys + import platform + import os.path + import six + from behave.tag_matcher import ActiveTagMatcher + from behave4cmd0.setup_command_shell import setup_command_shell_processors4behave +-# PREPARED: +-# from behave.tag_matcher import setup_active_tag_values ++# PREPARED: from behave.tag_matcher import setup_active_tag_values + + ++# --------------------------------------------------------------------------- ++# TEST SUPPORT: For Active Tags ++# --------------------------------------------------------------------------- + def require_tool(tool_name): + """Check if a tool (an executable program) is provided on this platform. + +@@ -45,12 +48,14 @@ def require_tool(tool_name): + # print("TOOL-NOT-FOUND: %s" % tool_name) + return False + ++ + def as_bool_string(value): + if bool(value): + return "yes" + else: + return "no" + ++ + def discover_ci_server(): + # pylint: disable=invalid-name + ci_server = "none" +@@ -67,11 +72,17 @@ def discover_ci_server(): + return ci_server + + ++# --------------------------------------------------------------------------- ++# BEHAVE SUPPORT: Active Tags ++# --------------------------------------------------------------------------- + # -- MATCHES ANY TAGS: @use.with_{category}={value} + # NOTE: active_tag_value_provider provides category values for active tags. ++python_version = "%s.%s" % sys.version_info[:2] + active_tag_value_provider = { ++ "platform": sys.platform, + "python2": str(six.PY2).lower(), + "python3": str(six.PY3).lower(), ++ "python.version": python_version, + # -- python.implementation: cpython, pypy, jython, ironpython + "python.implementation": platform.python_implementation().lower(), + "pypy": str("__pypy__" in sys.modules).lower(), +@@ -82,17 +92,33 @@ def discover_ci_server(): + } + active_tag_matcher = ActiveTagMatcher(active_tag_value_provider) + ++ ++def print_active_tags_summary(): ++ active_tag_data = active_tag_value_provider ++ print("ACTIVE-TAG SUMMARY:") ++ print("use.with_python.version=%s" % active_tag_data.get("python.version")) ++ # print("use.with_platform=%s" % active_tag_data.get("platform")) ++ # print("use.with_os=%s" % active_tag_data.get("os")) ++ print() ++ ++ ++# --------------------------------------------------------------------------- ++# BEHAVE HOOKS: ++# --------------------------------------------------------------------------- + def before_all(context): + # -- SETUP ACTIVE-TAG MATCHER (with userdata): + # USE: behave -D browser=safari ... +- # NOT-NEEDED: setup_active_tag_values(active_tag_value_provider, +- # context.config.userdata) ++ # NOT-NEEDED: ++ # setup_active_tag_values(active_tag_value_provider, context.config.userdata) + setup_command_shell_processors4behave() ++ print_active_tags_summary() ++ + + def before_feature(context, feature): + if active_tag_matcher.should_exclude_with(feature.tags): + feature.skip(reason=active_tag_matcher.exclude_reason) + ++ + def before_scenario(context, scenario): + if active_tag_matcher.should_exclude_with(scenario.effective_tags): + scenario.skip(reason=active_tag_matcher.exclude_reason) +diff --git a/issue.features/issue0330.feature b/issue.features/issue0330.feature +index dc1ebe75..81cb6e29 100644 +--- a/issue.features/issue0330.feature ++++ b/issue.features/issue0330.feature +@@ -70,6 +70,7 @@ Feature: Issue #330: Skipped scenarios are included in junit reports when --no-s + And note that "bob.feature is skipped" + + ++ @not.with_python.version=3.8 + Scenario: Junit report for skipped feature is created with --show-skipped + When I run "behave --junit -t @tag1 --show-skipped @alice_and_bob.featureset" + Then it should pass with: +@@ -83,6 +84,23 @@ Feature: Issue #330: Skipped scenarios are included in junit reports when --no-s + <testsuite errors="0" failures="0" name="bob.Bob" skipped="1" tests="1" time="0.0"> + """ + ++ @use.with_python.version=3.8 ++ Scenario: Junit report for skipped feature is created with --show-skipped ++ When I run "behave --junit -t @tag1 --show-skipped @alice_and_bob.featureset" ++ Then it should pass with: ++ """ ++ 1 feature passed, 0 failed, 1 skipped ++ """ ++ And a file named "test_results/TESTS-alice.xml" exists ++ And a file named "test_results/TESTS-bob.xml" exists ++ And the file "test_results/TESTS-bob.xml" should contain: ++ """ ++ <testsuite name="bob.Bob" tests="1" errors="0" failures="0" skipped="1" time="0.0"> ++ """ ++ # -- HINT FOR: Python < 3.8 ++ # <testsuite errors="0" failures="0" name="bob.Bob" skipped="1" tests="1" time="0.0"> ++ ++ @not.with_python.version=3.8 + Scenario: Junit report for skipped scenario is neither shown nor counted with --no-skipped + When I run "behave --junit -t @tag1 --no-skipped" + Then it should pass with: +@@ -102,7 +120,30 @@ Feature: Issue #330: Skipped scenarios are included in junit reports when --no-s + """ + And note that "Charly2 is the skipped scenarion in charly.feature" + ++ @use.with_python.version=3.8 ++ Scenario: Junit report for skipped scenario is neither shown nor counted with --no-skipped ++ When I run "behave --junit -t @tag1 --no-skipped" ++ Then it should pass with: ++ """ ++ 2 features passed, 0 failed, 1 skipped ++ 2 scenarios passed, 0 failed, 2 skipped ++ """ ++ And a file named "test_results/TESTS-alice.xml" exists ++ And a file named "test_results/TESTS-charly.xml" exists ++ And the file "test_results/TESTS-charly.xml" should contain: ++ """ ++ <testsuite name="charly.Charly" tests="1" errors="0" failures="0" skipped="0" ++ """ ++ # -- HINT FOR: Python < 3.8 ++ # <testsuite errors="0" failures="0" name="charly.Charly" skipped="0" tests="1" ++ And the file "test_results/TESTS-charly.xml" should not contain: ++ """ ++ <testcase classname="charly.Charly" name="Charly2" ++ """ ++ And note that "Charly2 is the skipped scenarion in charly.feature" ++ + ++ @not.with_python.version=3.8 + Scenario: Junit report for skipped scenario is shown and counted with --show-skipped + When I run "behave --junit -t @tag1 --show-skipped" + Then it should pass with: +@@ -122,3 +163,26 @@ Feature: Issue #330: Skipped scenarios are included in junit reports when --no-s + """ + And note that "Charly2 is the skipped scenarion in charly.feature" + ++ ++ @use.with_python.version=3.8 ++ Scenario: Junit report for skipped scenario is shown and counted with --show-skipped ++ When I run "behave --junit -t @tag1 --show-skipped" ++ Then it should pass with: ++ """ ++ 2 features passed, 0 failed, 1 skipped ++ 2 scenarios passed, 0 failed, 2 skipped ++ """ ++ And a file named "test_results/TESTS-alice.xml" exists ++ And a file named "test_results/TESTS-charly.xml" exists ++ And the file "test_results/TESTS-charly.xml" should contain: ++ """ ++ <testsuite name="charly.Charly" tests="2" errors="0" failures="0" skipped="1" ++ """ ++ # HINT: Python < 3.8 ++ # <testsuite errors="0" failures="0" name="charly.Charly" skipped="1" tests="2" ++ And the file "test_results/TESTS-charly.xml" should contain: ++ """ ++ <testcase classname="charly.Charly" name="Charly2" status="skipped" ++ """ ++ And note that "Charly2 is the skipped scenarion in charly.feature" ++ +diff --git a/issue.features/issue0446.feature b/issue.features/issue0446.feature +index a2ed892d..901bdec5 100644 +--- a/issue.features/issue0446.feature ++++ b/issue.features/issue0446.feature +@@ -58,6 +58,7 @@ Feature: Issue #446 -- Support scenario hook-errors with JUnitReporter + behave.reporter.junit.show_hostname = False + """ + ++ @not.with_python.version=3.8 + Scenario: Hook error in before_scenario() + When I run "behave -f plain --junit features/before_scenario_failure.feature" + Then it should fail with: +@@ -86,6 +87,40 @@ Feature: Issue #446 -- Support scenario hook-errors with JUnitReporter + And note that "the traceback is contained in the XML element <error/>" + + ++ @use.with_python.version=3.8 ++ Scenario: Hook error in before_scenario() ++ When I run "behave -f plain --junit features/before_scenario_failure.feature" ++ Then it should fail with: ++ """ ++ 0 scenarios passed, 1 failed, 0 skipped ++ """ ++ And the command output should contain: ++ """ ++ HOOK-ERROR in before_scenario: RuntimeError: OOPS ++ """ ++ And the file "reports/TESTS-before_scenario_failure.xml" should contain: ++ """ ++ <testsuite name="before_scenario_failure.Alice" tests="1" errors="1" failures="0" skipped="0" ++ """ ++ # -- HINT FOR: Python < 3.8 ++ # <testsuite errors="1" failures="0" name="before_scenario_failure.Alice" skipped="0" tests="1" ++ And the file "reports/TESTS-before_scenario_failure.xml" should contain: ++ """ ++ <error type="RuntimeError" message="HOOK-ERROR in before_scenario: RuntimeError: OOPS"> ++ """ ++ # -- HINT FOR: Python < 3.8 ++ # <error message="HOOK-ERROR in before_scenario: RuntimeError: OOPS" type="RuntimeError"> ++ And the file "reports/TESTS-before_scenario_failure.xml" should contain: ++ """ ++ File "features/environment.py", line 6, in before_scenario ++ cause_hook_failure() ++ File "features/environment.py", line 2, in cause_hook_failure ++ raise RuntimeError("OOPS") ++ """ ++ And note that "the traceback is contained in the XML element <error/>" ++ ++ ++ @not.with_python.version=3.8 + Scenario: Hook error in after_scenario() + When I run "behave -f plain --junit features/after_scenario_failure.feature" + Then it should fail with: +@@ -114,3 +149,38 @@ Feature: Issue #446 -- Support scenario hook-errors with JUnitReporter + raise RuntimeError("OOPS") + """ + And note that "the traceback is contained in the XML element <error/>" ++ ++ ++ @use.with_python.version=3.8 ++ Scenario: Hook error in after_scenario() ++ When I run "behave -f plain --junit features/after_scenario_failure.feature" ++ Then it should fail with: ++ """ ++ 0 scenarios passed, 1 failed, 0 skipped ++ """ ++ And the command output should contain: ++ """ ++ Scenario: B1 ++ Given another step passes ... passed ++ HOOK-ERROR in after_scenario: RuntimeError: OOPS ++ """ ++ And the file "reports/TESTS-after_scenario_failure.xml" should contain: ++ """ ++ <testsuite name="after_scenario_failure.Bob" tests="1" errors="1" failures="0" skipped="0" ++ """ ++ # -- HINT FOR: Python < 3.8 ++ # <testsuite errors="1" failures="0" name="after_scenario_failure.Bob" skipped="0" tests="1" ++ And the file "reports/TESTS-after_scenario_failure.xml" should contain: ++ """ ++ <error type="RuntimeError" message="HOOK-ERROR in after_scenario: RuntimeError: OOPS"> ++ """ ++ # -- HINT FOR: Python < 3.8 ++ # <error message="HOOK-ERROR in after_scenario: RuntimeError: OOPS" type="RuntimeError"> ++ And the file "reports/TESTS-after_scenario_failure.xml" should contain: ++ """ ++ File "features/environment.py", line 10, in after_scenario ++ cause_hook_failure() ++ File "features/environment.py", line 2, in cause_hook_failure ++ raise RuntimeError("OOPS") ++ """ ++ And note that "the traceback is contained in the XML element <error/>" +diff --git a/issue.features/issue0457.feature b/issue.features/issue0457.feature +index f80640e9..46f96e9c 100644 +--- a/issue.features/issue0457.feature ++++ b/issue.features/issue0457.feature +@@ -24,6 +24,7 @@ Feature: Issue #457 -- Double-quotes in error messages of JUnit XML reports + """ + + ++ @not.with_python.version=3.8 + Scenario: Use failing assertation in a JUnit XML report + Given a file named "features/fails1.feature" with: + """ +@@ -44,6 +45,31 @@ Feature: Issue #457 -- Double-quotes in error messages of JUnit XML reports + <failure message="FAILED: My name is "Alice"" + """ + ++ @use.with_python.version=3.8 ++ Scenario: Use failing assertation in a JUnit XML report ++ Given a file named "features/fails1.feature" with: ++ """ ++ Feature: ++ Scenario: Alice ++ Given a step fails with message: ++ ''' ++ My name is "Alice" ++ ''' ++ """ ++ When I run "behave --junit features/fails1.feature" ++ Then it should fail with: ++ """ ++ 0 scenarios passed, 1 failed, 0 skipped ++ """ ++ And the file "reports/TESTS-fails1.xml" should contain: ++ """ ++ <failure type="AssertionError" message="FAILED: My name is "Alice""> ++ """ ++ # -- HINT FOR: Python < 3.8 ++ # <failure message="FAILED: My name is "Alice"" ++ ++ ++ @not.with_python.version=3.8 + Scenario: Use exception in a JUnit XML report + Given a file named "features/fails2.feature" with: + """ +@@ -63,3 +89,26 @@ Feature: Issue #457 -- Double-quotes in error messages of JUnit XML reports + """ + <error message="My name is "Bob" and <here> I am" + """ ++ ++ @use.with_python.version=3.8 ++ Scenario: Use exception in a JUnit XML report ++ Given a file named "features/fails2.feature" with: ++ """ ++ Feature: ++ Scenario: Bob ++ Given a step fails with error and message: ++ ''' ++ My name is "Bob" and <here> I am ++ ''' ++ """ ++ When I run "behave --junit features/fails2.feature" ++ Then it should fail with: ++ """ ++ 0 scenarios passed, 1 failed, 0 skipped ++ """ ++ And the file "reports/TESTS-fails2.xml" should contain: ++ """ ++ <error type="RuntimeError" message="My name is "Bob" and <here> I am"> ++ """ ++ # -- HINT FOR: Python < 3.8 ++ # <error message="My name is "Bob" and <here> I am" |