[packages/python-pytest-benchmark] - initial, tests not run for now
qboosh
qboosh at pld-linux.org
Sun Dec 9 12:28:59 CET 2018
commit 911860a85cbac78fb638105c881fc59ac2ddc44b
Author: Jakub Bogusz <qboosh at pld-linux.org>
Date: Sun Dec 9 12:33:20 2018 +0100
- initial, tests not run for now
python-pytest-benchmark-capture.patch | 30 +++++
python-pytest-benchmark-tests.patch | 229 ++++++++++++++++++++++++++++++++++
python-pytest-benchmark.spec | 198 +++++++++++++++++++++++++++++
3 files changed, 457 insertions(+)
---
diff --git a/python-pytest-benchmark.spec b/python-pytest-benchmark.spec
new file mode 100644
index 0000000..f076aec
--- /dev/null
+++ b/python-pytest-benchmark.spec
@@ -0,0 +1,198 @@
+#
+# Conditional build:
+%bcond_without doc # Sphinx documentation
+%bcond_with tests # unit tests [very sensitive to pytest output]
+%bcond_without python2 # CPython 2.x module
+%bcond_without python3 # CPython 3.x module
+
+Summary: py.test fixture for benchmarking code
+Summary(pl.UTF-8): Wyposażenie py.testa do testowania wydajności kodu
+Name: python-pytest-benchmark
+Version: 3.1.1
+Release: 1
+License: BSD
+Group: Libraries/Python
+#Source0Download: https://pypi.org/simple/pytest-benchmark/
+Source0: https://files.pythonhosted.org/packages/source/p/pytest-benchmark/pytest-benchmark-%{version}.tar.gz
+# Source0-md5: dce60d8b2a63389cf8619acce8297186
+Patch0: %{name}-capture.patch
+Patch1: %{name}-tests.patch
+URL: https://github.com/ionelmc/pytest-benchmark
+%if %{with python2}
+BuildRequires: python-modules >= 1:2.6
+BuildRequires: python-setuptools
+%if %{with tests}
+#BuildRequires: python-aspectlib >= 1.4.2
+# for storage tests
+#BuildRequires: python-elasticsearch >= 5.3.0
+BuildRequires: python-freezegun >= 0.3.8
+BuildRequires: python-hunter >= 1.4.1
+BuildRequires: python-pathlib >= 1.0.1
+BuildRequires: python-py-cpuinfo
+# for histogram tests
+#BuildRequires: python-pygal >= 2.2.1
+#BuildRequires: python-pygaljs >= 1.0.1
+BuildRequires: python-pytest >= 2.8
+BuildRequires: python-statistics >= 1.0.3.5
+%endif
+%endif
+%if %{with python3}
+BuildRequires: python3-modules >= 1:3.4
+BuildRequires: python3-setuptools
+%if %{with tests}
+#BuildRequires: python3-aspectlib >= 1.4.2
+# for storage tests
+#BuildRequires: python3-elasticsearch >= 5.3.0
+BuildRequires: python3-freezegun >= 0.3.8
+BuildRequires: python3-hunter >= 1.4.1
+BuildRequires: python3-py-cpuinfo
+# for histogram tests
+#BuildRequires: python3-pygal >= 2.2.3
+#BuildRequires: python3-pygaljs >= 1.0.1
+BuildRequires: python3-pytest >= 2.8
+%if "%{py3_ver}" < "3.3"
+BuildRequires: python3-mock >= 2.0.0
+%endif
+%if "%{py3_ver}" < "3.4"
+BuildRequires: python3-pathlib >= 1.0.1
+BuildRequires: python3-statistics >= 1.0.3.5
+%endif
+%endif
+%endif
+BuildRequires: rpm-pythonprov
+BuildRequires: rpmbuild(macros) >= 1.714
+%if %{with doc}
+BuildRequires: python3-sphinx_py3doc_enhanced_theme
+BuildRequires: sphinx-pdg-3
+%endif
+Requires: python-modules >= 1:2.6
+BuildArch: noarch
+BuildRoot: %{tmpdir}/%{name}-%{version}-root-%(id -u -n)
+
+%description
+A py.test fixture for benchmarking code. It will group the tests into
+rounds that are calibrated to the chosen timer.
+
+%description -l pl.UTF-8
+Wyposażenie (fixture) modułu py.test do testowania wydajności kodu.
+Grupuje testy w rundy, które są kalibrowane do wybranego stopera.
+
+%package -n python3-pytest-benchmark
+Summary: py.test fixture for benchmarking code
+Summary(pl.UTF-8): Wyposażenie py.testa do testowania wydajności kodu
+Group: Libraries/Python
+Requires: python3-modules >= 1:3.4
+
+%description -n python3-pytest-benchmark
+A py.test fixture for benchmarking code. It will group the tests into
+rounds that are calibrated to the chosen timer.
+
+%description -n python3-pytest-benchmark -l pl.UTF-8
+Wyposażenie (fixture) modułu py.test do testowania wydajności kodu.
+Grupuje testy w rundy, które są kalibrowane do wybranego stopera.
+
+%package apidocs
+Summary: API documentation for Python pytest_benchmark module
+Summary(pl.UTF-8): Dokumentacja API modułu Pythona pytest_benchmark
+Group: Documentation
+
+%description apidocs
+API documentation for Python pytest_benchmark module.
+
+%description apidocs -l pl.UTF-8
+Dokumentacja API modułu Pythona pytest_benchmark.
+
+%prep
+%setup -q -n pytest-benchmark-%{version}
+%patch0 -p1
+%patch1 -p1
+
+# (mostly temporarily disabled tests)
+# requires elasticsearch
+%{__rm} tests/test_elasticsearch_storage.py
+# no py.test-benchmark program before install
+%{__rm} tests/test_cli.py
+# requires pygal for histograms
+%{__rm} tests/test_storage.py
+# require aspectlib
+%{__rm} tests/test_with_testcase.py
+%{__rm} tests/test_with_weaver.py
+# a few too depending on git, one elasticsearch
+%{__rm} tests/test_utils.py
+
+%build
+%if %{with python2}
+%py_build
+
+%if %{with tests}
+PYTHONPATH=$(pwd)/src \
+%{__python} -m pytest tests
+%endif
+%endif
+
+%if %{with python3}
+%py3_build
+
+%if %{with tests}
+PYTHONPATH=$(pwd)/src \
+%{__python3} -m pytest tests
+%endif
+%endif
+
+%if %{with doc}
+cd docs
+PYTHONPATH=$(pwd)/../src \
+sphinx-build-3 -b html . _build/html
+%endif
+
+%install
+rm -rf $RPM_BUILD_ROOT
+
+%if %{with python2}
+%py_install
+
+%py_postclean
+
+%{__mv} $RPM_BUILD_ROOT%{_bindir}/py.test-benchmark{,-2}
+%{__mv} $RPM_BUILD_ROOT%{_bindir}/pytest-benchmark{,-2}
+%endif
+
+%if %{with python3}
+%py3_install
+
+%{__mv} $RPM_BUILD_ROOT%{_bindir}/py.test-benchmark{,-3}
+%{__mv} $RPM_BUILD_ROOT%{_bindir}/pytest-benchmark{,-3}
+ln -s py.test-benchmark-3 $RPM_BUILD_ROOT%{_bindir}/py.test-benchmark
+ln -s pytest-benchmark-3 $RPM_BUILD_ROOT%{_bindir}/pytest-benchmark
+%endif
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%if %{with python2}
+%files
+%defattr(644,root,root,755)
+%doc AUTHORS.rst CHANGELOG.rst LICENSE README.rst
+%attr(755,root,root) %{_bindir}/py.test-benchmark-2
+%attr(755,root,root) %{_bindir}/pytest-benchmark-2
+%{py_sitescriptdir}/pytest_benchmark
+%{py_sitescriptdir}/pytest_benchmark-%{version}-py*.egg-info
+%endif
+
+%if %{with python3}
+%files -n python3-pytest-benchmark
+%defattr(644,root,root,755)
+%doc AUTHORS.rst CHANGELOG.rst LICENSE README.rst
+%attr(755,root,root) %{_bindir}/py.test-benchmark
+%attr(755,root,root) %{_bindir}/pytest-benchmark
+%attr(755,root,root) %{_bindir}/py.test-benchmark-3
+%attr(755,root,root) %{_bindir}/pytest-benchmark-3
+%{py3_sitescriptdir}/pytest_benchmark
+%{py3_sitescriptdir}/pytest_benchmark-%{version}-py*.egg-info
+%endif
+
+%if %{with doc}
+%files apidocs
+%defattr(644,root,root,755)
+%doc docs/_build/html/{_images,_modules,_static,*.html,*.js}
+%endif
diff --git a/python-pytest-benchmark-capture.patch b/python-pytest-benchmark-capture.patch
new file mode 100644
index 0000000..7140172
--- /dev/null
+++ b/python-pytest-benchmark-capture.patch
@@ -0,0 +1,30 @@
+Adjust for recent pytest
+--- pytest-benchmark-3.1.1/src/pytest_benchmark/logger.py.orig 2017-07-26 13:48:12.000000000 +0200
++++ pytest-benchmark-3.1.1/src/pytest_benchmark/logger.py 2018-08-03 07:41:12.218113772 +0200
+@@ -24,14 +24,14 @@
+ def warn(self, code, text, warner=None, suspend=False, fslocation=None):
+ if self.verbose:
+ if suspend and self.capman:
+- self.capman.suspendcapture(in_=True)
++ self.capman.suspend_global_capture(in_=True)
+ self.term.line("")
+ self.term.sep("-", red=True, bold=True)
+ self.term.write(" WARNING: ", red=True, bold=True)
+ self.term.line(text, red=True)
+ self.term.sep("-", red=True, bold=True)
+ if suspend and self.capman:
+- self.capman.resumecapture()
++ self.capman.resume_global_capture()
+ if warner is None:
+ warner = self.pytest_warn
+ if fslocation and self.pytest_warn_has_fslocation:
+@@ -55,7 +55,7 @@
+ def debug(self, text, **kwargs):
+ if self.verbose:
+ if self.capman:
+- self.capman.suspendcapture(in_=True)
++ self.capman.suspend_global_capture(in_=True)
+ self.info(text, **kwargs)
+ if self.capman:
+- self.capman.resumecapture()
++ self.capman.resume_global_capture()
diff --git a/python-pytest-benchmark-tests.patch b/python-pytest-benchmark-tests.patch
new file mode 100644
index 0000000..a87120a
--- /dev/null
+++ b/python-pytest-benchmark-tests.patch
@@ -0,0 +1,229 @@
+--- pytest-benchmark-3.1.1/tests/test_benchmark.py.orig 2017-07-25 13:13:37.000000000 +0200
++++ pytest-benchmark-3.1.1/tests/test_benchmark.py 2018-08-03 08:22:04.858085767 +0200
+@@ -128,11 +128,11 @@
+ result.stdout.fnmatch_lines([
+ "*collected 5 items",
+ "*",
+- "test_groups.py::*test_groups PASSED",
+- "test_groups.py::test_fast PASSED",
+- "test_groups.py::test_slow PASSED",
+- "test_groups.py::test_slower PASSED",
+- "test_groups.py::test_xfast PASSED",
++ "test_groups.py::*test_groups PASSED*",
++ "test_groups.py::test_fast PASSED*",
++ "test_groups.py::test_slow PASSED*",
++ "test_groups.py::test_slower PASSED*",
++ "test_groups.py::test_xfast PASSED*",
+ "*",
+ "* benchmark: 2 tests *",
+ "*",
+@@ -460,7 +460,7 @@
+ result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test)
+ result.stdout.fnmatch_lines([
+ "*collected 3 items",
+- "test_max_time_min_rounds.py ...",
++ "test_max_time_min_rounds.py ...*",
+ "* benchmark: 2 tests *",
+ "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
+ "------*",
+@@ -476,7 +476,7 @@
+ result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', test)
+ result.stdout.fnmatch_lines([
+ "*collected 3 items",
+- "test_max_time.py ...",
++ "test_max_time.py ...*",
+ "* benchmark: 2 tests *",
+ "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
+ "------*",
+@@ -587,7 +587,7 @@
+ result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002', '-rw',
+ test)
+ result.stdout.fnmatch_lines([
+- "WBENCHMARK-C1 * Can't compare. No benchmark files * '0002'.",
++ "* Can't compare. No benchmark files * '0002'.",
+ ])
+
+
+@@ -597,7 +597,7 @@
+ result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '--benchmark-compare=0002',
+ test, '--benchmark-verbose')
+ result.stderr.fnmatch_lines([
+- " WARNING: Can't compare. No benchmark files * '0002'.",
++ "* Can't compare. No benchmark files * '0002'.",
+ ])
+
+
+@@ -606,7 +606,7 @@
+ result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
+ test, '--benchmark-compare')
+ result.stdout.fnmatch_lines([
+- "WBENCHMARK-C2 * Can't compare. No benchmark files in '*'."
++ "* Can't compare. No benchmark files in '*'."
+ " Can't load the previous benchmark."
+ ])
+
+@@ -616,7 +616,7 @@
+ result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
+ test, '--benchmark-compare', '--benchmark-verbose')
+ result.stderr.fnmatch_lines([
+- " WARNING: Can't compare. No benchmark files in '*'."
++ "* Can't compare. No benchmark files in '*'."
+ " Can't load the previous benchmark."
+ ])
+
+@@ -626,7 +626,7 @@
+ result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules', '-rw',
+ test, '--benchmark-compare=1')
+ result.stdout.fnmatch_lines([
+- "WBENCHMARK-C1 * Can't compare. No benchmark files in '*' match '1'."
++ "* Can't compare. No benchmark files in '*' match '1'."
+ ])
+
+
+@@ -635,7 +635,7 @@
+ result = testdir.runpytest('--benchmark-max-time=0.0000001', '--doctest-modules',
+ test, '--benchmark-compare=1', '--benchmark-verbose')
+ result.stderr.fnmatch_lines([
+- " WARNING: Can't compare. No benchmark files in '*' match '1'."
++ "* Can't compare. No benchmark files in '*' match '1'."
+ ])
+
+
+@@ -679,6 +679,7 @@
+ assert bench_info['extra_info'] == {'foo': 'bar'}
+
+
++ at pytest.mark.skip("requires pygal")
+ def test_histogram(testdir):
+ test = testdir.makepyfile(SIMPLE_TEST)
+ result = testdir.runpytest('--doctest-modules', '--benchmark-histogram=foobar',
+@@ -715,7 +716,7 @@
+ result = testdir.runpytest('--benchmark-disable-gc', test)
+ result.stdout.fnmatch_lines([
+ "*collected 2 items",
+- "test_disable_gc.py ..",
++ "test_disable_gc.py ..*",
+ "* benchmark: 2 tests *",
+ "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
+ "------*",
+@@ -731,7 +732,7 @@
+ result = testdir.runpytest('--benchmark-timer=time.time', test)
+ result.stdout.fnmatch_lines([
+ "*collected 2 items",
+- "test_custom_timer.py ..",
++ "test_custom_timer.py ..*",
+ "* benchmark: 2 tests *",
+ "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
+ "------*",
+@@ -757,7 +758,7 @@
+ result = testdir.runpytest('--benchmark-sort=mean', test)
+ result.stdout.fnmatch_lines([
+ "*collected 2 items",
+- "test_sort_by_mean.py ..",
++ "test_sort_by_mean.py ..*",
+ "* benchmark: 2 tests *",
+ "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
+ "------*",
+@@ -858,11 +859,11 @@
+ result.stdout.fnmatch_lines([
+ "*collected 5 items",
+
+- "test_abort_broken.py::test_bad FAILED",
+- "test_abort_broken.py::test_bad2 FAILED",
+- "test_abort_broken.py::test_ok[a] ERROR",
+- "test_abort_broken.py::test_ok[b] ERROR",
+- "test_abort_broken.py::test_ok[c] ERROR",
++ "test_abort_broken.py::test_bad FAILED*",
++ "test_abort_broken.py::test_bad2 FAILED*",
++ "test_abort_broken.py::test_ok[a] ERROR*",
++ "test_abort_broken.py::test_ok[b] ERROR*",
++ "test_abort_broken.py::test_ok[c] ERROR*",
+
+ "*====== ERRORS ======*",
+ "*______ ERROR at setup of test_ok[[]a[]] ______*",
+@@ -977,11 +978,11 @@
+ result = testdir.runpytest('-vv', '--doctest-modules', test)
+ result.stdout.fnmatch_lines([
+ "*collected 5 items",
+- "test_basic.py::*test_basic PASSED",
+- "test_basic.py::test_slow PASSED",
+- "test_basic.py::test_slower PASSED",
+- "test_basic.py::test_xfast PASSED",
+- "test_basic.py::test_fast PASSED",
++ "test_basic.py::*test_basic PASSED*",
++ "test_basic.py::test_slow PASSED*",
++ "test_basic.py::test_slower PASSED*",
++ "test_basic.py::test_xfast PASSED*",
++ "test_basic.py::test_fast PASSED*",
+ "",
+ "* benchmark: 4 tests *",
+ "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
+@@ -1001,11 +1002,11 @@
+ result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-skip', test)
+ result.stdout.fnmatch_lines([
+ "*collected 5 items",
+- "test_skip.py::*test_skip PASSED",
+- "test_skip.py::test_slow SKIPPED",
+- "test_skip.py::test_slower SKIPPED",
+- "test_skip.py::test_xfast SKIPPED",
+- "test_skip.py::test_fast SKIPPED",
++ "test_skip.py::*test_skip PASSED*",
++ "test_skip.py::test_slow SKIPPED*",
++ "test_skip.py::test_slower SKIPPED*",
++ "test_skip.py::test_xfast SKIPPED*",
++ "test_skip.py::test_fast SKIPPED*",
+ "*====== 1 passed, 4 skipped* seconds ======*",
+ ])
+
+@@ -1015,11 +1016,11 @@
+ result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test)
+ result.stdout.fnmatch_lines([
+ "*collected 5 items",
+- "test_disable.py::*test_disable PASSED",
+- "test_disable.py::test_slow PASSED",
+- "test_disable.py::test_slower PASSED",
+- "test_disable.py::test_xfast PASSED",
+- "test_disable.py::test_fast PASSED",
++ "test_disable.py::*test_disable PASSED*",
++ "test_disable.py::test_slow PASSED*",
++ "test_disable.py::test_slower PASSED*",
++ "test_disable.py::test_xfast PASSED*",
++ "test_disable.py::test_fast PASSED*",
+ "*====== 5 passed * seconds ======*",
+ ])
+
+@@ -1029,7 +1030,7 @@
+ result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test)
+ result.stdout.fnmatch_lines([
+ "*collected 5 items",
+- "test_mark_selection.py::test_xfast PASSED",
++ "test_mark_selection.py::test_xfast PASSED*",
+ "* benchmark: 1 tests *",
+ "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
+ "------*",
+@@ -1045,11 +1046,11 @@
+ result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-only', test)
+ result.stdout.fnmatch_lines([
+ "*collected 5 items",
+- "test_only_benchmarks.py::*test_only_benchmarks SKIPPED",
+- "test_only_benchmarks.py::test_slow PASSED",
+- "test_only_benchmarks.py::test_slower PASSED",
+- "test_only_benchmarks.py::test_xfast PASSED",
+- "test_only_benchmarks.py::test_fast PASSED",
++ "test_only_benchmarks.py::*test_only_benchmarks SKIPPED*",
++ "test_only_benchmarks.py::test_slow PASSED*",
++ "test_only_benchmarks.py::test_slower PASSED*",
++ "test_only_benchmarks.py::test_xfast PASSED*",
++ "test_only_benchmarks.py::test_fast PASSED*",
+ "* benchmark: 4 tests *",
+ "Name (time in ?s) * Min * Max * Mean * StdDev * Rounds * Iterations",
+ "------*",
+@@ -1067,7 +1068,7 @@
+ result = testdir.runpytest('--doctest-modules', '--benchmark-columns=max,iterations,min', test)
+ result.stdout.fnmatch_lines([
+ "*collected 3 items",
+- "test_columns.py ...",
++ "test_columns.py ...*",
+ "* benchmark: 2 tests *",
+ "Name (time in ?s) * Max * Iterations * Min *",
+ "------*",
================================================================
---- gitweb:
http://git.pld-linux.org/gitweb.cgi/packages/python-pytest-benchmark.git/commitdiff/911860a85cbac78fb638105c881fc59ac2ddc44b
More information about the pld-cvs-commit
mailing list