Description: migrate from nose to pytest
 The forwarding is not needed because upstream seems to be in the process
 of sunsetting nosetests usage as well in parallel.  This patch may not be
 necessary with next upstream version.
Author: Étienne Mollier <emollier@debian.org>
Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1018559
Forwarded: not-nedeed
Last-Update: 2022-12-03
---
This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
--- a/tests/test_anib.py
+++ b/tests/test_anib.py
@@ -6,10 +6,10 @@
 
 These tests are intended to be run from the repository root using:
 
-nosetests -v
+pytest -v
 
-print() statements will be caught by nosetests unless there is an
-error. They can also be recovered with the -s option.
+print() statements will be caught by pytest unless there is an
+error.
 
 (c) The James Hutton Institute 2017
 Author: Leighton Pritchard
@@ -55,7 +55,7 @@
 
 import pandas as pd
 
-from nose.tools import assert_equal, nottest
+import pytest
 from pandas.testing import assert_frame_equal
 
 from pyani import anib, pyani_files
@@ -274,66 +274,66 @@
         os.makedirs(self.fmtdboutdir, exist_ok=True)
         os.makedirs(self.makeblastdbdir, exist_ok=True)
 
-    @nottest  #  legacy BLAST deprecated
+    @pytest.mark.skip(reason="legacy BLAST deprecated")
     def test_formatdb_generation(self):
         """generate formatdb command-line."""
         cmd = anib.construct_formatdb_cmd(os.path.join(self.seqdir, "NC_002696.fna"), self.fmtdboutdir)
-        assert_equal(cmd[0], self.fmtdbcmd)  # correct command
+        assert cmd[0] == self.fmtdbcmd  # correct command
         assert os.path.isfile(cmd[1])  # creates new file
 
     def test_makeblastdb_generation(self):
         """generate makeblastdb command-line."""
         cmd = anib.construct_makeblastdb_cmd(os.path.join(self.seqdir, "NC_002696.fna"), self.makeblastdbdir)
-        assert_equal(cmd[0], self.makeblastdbcmd)  # correct command
+        assert cmd[0] == self.makeblastdbcmd  # correct command
 
     def test_blastdb_commands(self):
         """generate BLAST+ db commands."""
         # BLAST+
         cmds = anib.generate_blastdb_commands(self.blastdbfnames, self.outdir, mode="ANIb")
-        assert_equal(cmds, self.blastdbtgt)
+        assert cmds == self.blastdbtgt
 
-    @nottest  #  legacy BLAST deprecated
+    @pytest.mark.skip(reason="legacy BLAST deprecated")
     def test_legacy_blastdb_commands(self):
         """generate legacy BLAST db commands."""
         # legacy
         cmds = anib.generate_blastdb_commands(self.blastdbfnames, self.outdir, mode="ANIblastall")
-        assert_equal(cmds, self.blastdbtgtlegacy)
+        assert cmds == self.blastdbtgtlegacy
 
     def test_blastn_generation(self):
         """generate BLASTN+ command-line."""
         cmd = anib.construct_blastn_cmdline(self.blastdbfnames[0], self.blastdbfnames[1], self.outdir)
-        assert_equal(cmd, self.blastncmd)
+        assert cmd == self.blastncmd
 
-    @nottest  #  legacy BLAST deprecated
+    @pytest.mark.skip(reason="legacy BLAST deprecated")
     def test_blastall_generation(self):
         """generate legacy BLASTN command-line."""
         cmd = anib.construct_blastall_cmdline(self.blastdbfnames[0], self.blastdbfnames[1], self.outdir)
-        assert_equal(cmd, self.blastallcmd)
+        assert cmd == self.blastallcmd
 
     def test_blastn_commands(self):
         """generate BLASTN+ commands."""
         # BLAST+
         cmds = anib.generate_blastn_commands(self.blastdbfnames, self.outdir, mode="ANIb")
-        assert_equal(cmds, self.blastntgt)
+        assert cmds == self.blastntgt
 
-    @nottest  #  legacy BLAST deprecated
+    @pytest.mark.skip(reason="legacy BLAST deprecated")
     def test_legacy_blastn_commands(self):
         """generate legacy BLASTN commands."""
         cmds = anib.generate_blastn_commands(self.blastdbfnames, self.outdir, mode="ANIblastall")
-        assert_equal(cmds, self.blastalltgt)
+        assert cmds == self.blastalltgt
 
-    @nottest  #  legacy BLAST deprecated
+    @pytest.mark.skip(reason="legacy BLAST deprecated")
     def test_blastall_dbjobdict(self):
         """generate dictionary of legacy BLASTN database jobs."""
         blastcmds = anib.make_blastcmd_builder("ANIblastall", self.outdir)
         jobdict = anib.build_db_jobs(self.infiles, blastcmds)
-        assert_equal(sorted([(k, v.script) for (k, v) in jobdict.items()]), self.blastalljobdict)
+        assert sorted([(k, v.script) for (k, v) in jobdict.items()]) == self.blastalljobdict
 
     def test_blastn_dbjobdict(self):
         """generate dictionary of BLASTN+ database jobs."""
         blastcmds = anib.make_blastcmd_builder("ANIb", self.outdir)
         jobdict = anib.build_db_jobs(self.infiles, blastcmds)
-        assert_equal(sorted([(k, v.script) for (k, v) in jobdict.items()]), self.blastnjobdict)
+        assert sorted([(k, v.script) for (k, v) in jobdict.items()]) == self.blastnjobdict
 
     def test_blastn_graph(self):
         """create jobgraph for BLASTN+ jobs."""
@@ -344,11 +344,11 @@
         # is a single dependency, which is a makeblastdb job
         for job in jobgraph:
             assert job.script.startswith("blastn")
-            assert_equal(1, len(job.dependencies))
+            assert 1 == len(job.dependencies)
             dep = job.dependencies[0]
             assert dep.script.startswith("makeblastdb")
 
-    @nottest  #  legacy BLAST deprecated
+    @pytest.mark.skip(reason="legacy BLAST deprecated")
     def test_blastall_graph(self):
         """create jobgraph for legacy BLASTN jobs."""
         fragresult = anib.fragment_fasta_files(self.infiles, self.outdir, self.fraglen)
@@ -358,7 +358,7 @@
         # is a single dependency, which is a makeblastdb job
         for job in jobgraph:
             assert job.script.startswith("blastall -p blastn")
-            assert_equal(1, len(job.dependencies))
+            assert 1 == len(job.dependencies)
             dep = job.dependencies[0]
             assert dep.script.startswith("formatdb")
 
@@ -456,16 +456,16 @@
             index=["NC_002696", "NC_010338", "NC_011916", "NC_014100"],
         )
 
-    @nottest  # legacy BLASTN deprecated
+    @pytest.mark.skip(reason="legacy BLAST deprecated")
     def test_parse_blasttab(self):
         """parses ANIblastall .blast_tab output."""
         fragdata = anib.get_fraglength_dict([self.fragfname])
         # ANIb output
         result = anib.parse_blast_tab(self.fname, fragdata, 0.3, 0.7, mode="ANIb")
-        assert_equal(result, (4016551, 93, 99.997693577050029))
+        assert result == (4016551, 93, 99.997693577050029)
         # ANIblastall output
         result = anib.parse_blast_tab(self.fname_legacy, fragdata, 0.3, 0.7, mode="ANIblastall")
-        assert_equal(result, (1966922, 406104, 78.578978313253018))
+        assert result == (1966922, 406104, 78.578978313253018)
 
     def test_blastdir_processing(self):
         """parses directory of .blast_tab output."""
@@ -478,7 +478,7 @@
             self.anibtgt.sort_index(axis=1).sort_index(),
         )
 
-    @nottest  #  legacy BLAST deprecated
+    @pytest.mark.skip(reason="legacy BLAST deprecated")
     def test_legacy_blastdir_processing(self):
         """parse directory of legacy .blast_tab output"""
         orglengths = pyani_files.get_sequence_lengths(self.infnames)
--- a/tests/test_anim.py
+++ b/tests/test_anim.py
@@ -7,10 +7,10 @@
 
 These tests are intended to be run from the repository root using:
 
-nosetests -v
+pytest -v
 
-print() statements will be caught by nosetests unless there is an
-error. They can also be recovered with the -s option.
+print() statements will be caught by pytest unless there is an
+error.
 
 (c) The James Hutton Institute 2017
 Author: Leighton Pritchard
@@ -56,7 +56,7 @@
 
 import pandas as pd
 
-from nose.tools import assert_equal
+import pytest
 from pandas.testing import assert_frame_equal
 
 from pyani import anim, pyani_files
@@ -152,12 +152,12 @@
         produced correctly
         """
         cmds = anim.construct_nucmer_cmdline("file1.fna", "file2.fna", outdir=self.outdir)
-        assert_equal(cmds, (self.ntgt, self.ftgt))
+        assert cmds == (self.ntgt, self.ftgt)
 
     def test_maxmatch_cmd_generation(self):
         """generate NUCmer command line with maxmatch."""
         ncmd, fcmd = anim.construct_nucmer_cmdline("file1.fna", "file2.fna", outdir=self.outdir, maxmatch=True)
-        assert_equal(ncmd, self.ntgtmax)
+        assert ncmd == self.ntgtmax
 
     def test_multi_cmd_generation(self):
         """generate multiple abstract NUCmer/delta-filter command-lines.
@@ -165,7 +165,7 @@
         Tests that all the input files are correctly-paired
         """
         cmds = anim.generate_nucmer_commands(self.files)
-        assert_equal(cmds, (self.ncmdlist, self.fcmdlist))
+        assert cmds == (self.ncmdlist, self.fcmdlist)
 
     def test_nucmer_job_generation(self):
         """generate dependency tree of NUCmer/delta-filter jobs.
@@ -173,11 +173,11 @@
         Tests that the correct dependency graph and naming scheme is produced.
         """
         joblist = anim.generate_nucmer_jobs(self.files, jobprefix="test")
-        assert_equal(len(joblist), 6)
+        assert len(joblist) == 6
         for idx, job in enumerate(joblist):
-            assert_equal(job.name, "test_%06d-f" % idx)  # filter job name
-            assert_equal(len(job.dependencies), 1)  # has NUCmer job
-            assert_equal(job.dependencies[0].name, "test_%06d-n" % idx)  # NUCmer job name
+            assert job.name == "test_%06d-f" % idx  # filter job name
+            assert len(job.dependencies) == 1  # has NUCmer job
+            assert job.dependencies[0].name == "test_%06d-n" % idx  # NUCmer job name
 
 
 class TestDeltafileProcessing(unittest.TestCase):
@@ -203,7 +203,7 @@
     def test_deltafile_import(self):
         """parses NUCmer .delta/.filter file."""
         result = anim.parse_delta(self.deltafile)
-        assert_equal(result, (4073917, 2191))
+        assert result == (4073917, 2191)
 
     def test_process_deltadir(self):
         """processes directory of .delta files into ANIResults."""
--- a/tests/test_concordance.py
+++ b/tests/test_concordance.py
@@ -7,10 +7,10 @@
 
 These tests are intended to be run from the repository root using:
 
-nosetests -v
+pytest -v
 
-print() statements will be caught by nosetests unless there is an
-error. They can also be recovered with the -s option.
+print() statements will be caught by pytest unless there is an
+error.
 
 (c) The James Hutton Institute 2017-2019
 Author: Leighton Pritchard
@@ -59,7 +59,7 @@
 
 import pandas as pd
 
-from nose.tools import assert_equal, assert_less, nottest
+import pytest
 
 from pyani import run_multiprocessing as run_mp
 from pyani import anib, anim, tetra, pyani_files, pyani_config
@@ -162,7 +162,7 @@
             diffmat, index=result_pid.index, columns=result_pid.columns
         )
         anim_diff.to_csv(os.path.join(self.outdir, "pyani_anim_diff.tab"), sep="\t")
-        assert_less(anim_diff.abs().values.max(), self.tolerance["ANIm"])
+        assert anim_diff.abs().values.max() < self.tolerance["ANIm"]
 
     def test_anib_concordance(self):
         """ANIb results concordant with JSpecies.
@@ -179,7 +179,7 @@
         jobgraph = anib.make_job_graph(
             self.infiles, fragfiles, anib.make_blastcmd_builder("ANIb", outdir)
         )
-        assert_equal(0, run_mp.run_dependency_graph(jobgraph))
+        assert 0 == run_mp.run_dependency_graph(jobgraph)
         results = anib.process_blast(outdir, self.orglengths, fraglengths, mode="ANIb")
         result_pid = results.percentage_identity
         result_pid.to_csv(os.path.join(self.outdir, "pyani_anib.tab"), sep="\t")
@@ -206,10 +206,10 @@
             diffmat, index=result_pid.index, columns=result_pid.columns
         )
         anib_diff.to_csv(os.path.join(self.outdir, "pyani_anib_diff.tab"), sep="\t")
-        assert_less(lo_diff.abs().values.max(), self.tolerance["ANIb_lo"])
-        assert_less(hi_diff.abs().values.max(), self.tolerance["ANIb_hi"])
+        assert lo_diff.abs().values.max() < self.tolerance["ANIb_lo"]
+        assert hi_diff.abs().values.max() < self.tolerance["ANIb_hi"]
 
-    @nottest  # legacy BLAST is deprecated
+    @pytest.mark.skip(reason="legacy BLAST is deprecated")
     def test_aniblastall_concordance(self):
         """ANIblastall results concordant with JSpecies."""
         # Perform ANIblastall on the input directory contents
@@ -221,7 +221,7 @@
         jobgraph = anib.make_job_graph(
             self.infiles, fragfiles, anib.make_blastcmd_builder("ANIblastall", outdir)
         )
-        assert_equal(0, run_mp.run_dependency_graph(jobgraph))
+        assert 0 == run_mp.run_dependency_graph(jobgraph)
         results = anib.process_blast(
             outdir, self.orglengths, fraglengths, mode="ANIblastall"
         )
@@ -237,7 +237,7 @@
         aniblastall_diff.to_csv(
             os.path.join(self.outdir, "pyani_aniblastall_diff.tab"), sep="\t"
         )
-        assert_less(aniblastall_diff.abs().values.max(), self.tolerance["ANIblastall"])
+        assert aniblastall_diff.abs().values.max() < self.tolerance["ANIblastall"]
 
     def test_tetra_concordance(self):
         """TETRA results concordant with JSpecies."""
@@ -253,4 +253,4 @@
         diffmat = results.values - self.target["Tetra"].values
         tetra_diff = pd.DataFrame(diffmat, index=results.index, columns=results.columns)
         tetra_diff.to_csv(os.path.join(self.outdir, "pyani_tetra_diff.tab"), sep="\t")
-        assert_less(tetra_diff.abs().values.max(), self.tolerance["TETRA"])
+        assert tetra_diff.abs().values.max() < self.tolerance["TETRA"]
--- a/tests/test_dependencies.py
+++ b/tests/test_dependencies.py
@@ -3,15 +3,12 @@
 """Tests for availability of pyani dependencies
 
 We only test for dependencies from non-standard libraries.
-
-These tests are intended to be run using the nose package
-(see https://nose.readthedocs.org/en/latest/).
 """
 
 import subprocess
 import sys
 
-from nose.tools import assert_equal, nottest
+import pytest
 
 
 def test_import_biopython():
@@ -50,10 +47,10 @@
         check=True,
     )
     print(result.stdout)
-    assert_equal(result.stdout[:6], b"blastn")
+    assert result.stdout[:6] == b"blastn"
 
 
-@nottest
+@pytest.mark.skip()
 def test_run_blastall():
     """Test that legacy BLAST is runnable."""
     cmd = "blastall"
@@ -65,7 +62,7 @@
         stderr=subprocess.PIPE,
     )
     print(result.stdout)
-    assert_equal(result.stdout[1:9], b"blastall")
+    assert result.stdout[1:9] == b"blastall"
 
 
 def test_run_nucmer():
@@ -79,4 +76,4 @@
         check=True,
     )
     print(result.stderr)  # NUCmer puts output to STDERR!
-    assert_equal(result.stderr[:6], b"nucmer")
+    assert result.stderr[:6] == b"nucmer"
--- a/tests/test_graphics.py
+++ b/tests/test_graphics.py
@@ -2,10 +2,6 @@
 
 """Tests for pyani graphics
 
-These tests are intended to be run using the nose package
-(see https://nose.readthedocs.org/en/latest/), from the repository root
-directory.
-
 If the test is run directly at the command-line, the output obtained by each
 test is returned to STDOUT.
 """
--- a/tests/test_jobs.py
+++ b/tests/test_jobs.py
@@ -7,10 +7,10 @@
 
 These tests are intended to be run from the repository root using:
 
-nosetests -v
+pytest -v
 
-print() statements will be caught by nosetests unless there is an
-error. They can also be recovered with the -s option.
+print() statements will be caught by pytest unless there is an
+error.
 
 (c) The James Hutton Institute 2017
 Author: Leighton Pritchard
@@ -53,7 +53,7 @@
 
 import unittest
 
-from nose.tools import (assert_equal, )
+import pytest
 
 from pyani import (pyani_jobs, )
 
@@ -69,33 +69,33 @@
     def test_create_job(self):
         """create a dummy job."""
         job = pyani_jobs.Job('empty', '')
-        assert_equal(job.script, "")
+        assert job.script == ""
 
     def test_create_job_with_command(self):
         """create dummy job with command."""
         job = pyani_jobs.Job('dummy', self.cmds[0])
-        assert_equal(job.script, self.cmds[0])
+        assert job.script == self.cmds[0]
 
     def test_add_dependency(self):
         """create dummy job with dependency."""
         job1 = pyani_jobs.Job('dummy_with_dependency', self.cmds[0])
         job2 = pyani_jobs.Job('dummy_dependency', self.cmds[1])
         job1.add_dependency(job2)
-        assert_equal(self.cmds[0], job1.script)
-        assert_equal(1, len(job1.dependencies))
+        assert self.cmds[0] == job1.script
+        assert 1 == len(job1.dependencies)
         dep = job1.dependencies[0]
-        assert_equal(self.cmds[1], dep.script)
+        assert self.cmds[1] == dep.script
 
     def test_remove_dependency(self):
         """create dummy job, add and remove dependency."""
         job1 = pyani_jobs.Job('dummy_with_dependency', self.cmds[0])
         job2 = pyani_jobs.Job('dummy_dependency', self.cmds[1])
         job1.add_dependency(job2)
-        assert_equal(1, len(job1.dependencies))
+        assert 1 == len(job1.dependencies)
         dep = job1.dependencies[0]
-        assert_equal(self.cmds[1], dep.script)
+        assert self.cmds[1] == dep.script
         job1.remove_dependency(dep)
-        assert_equal(0, len(job1.dependencies))
+        assert 0 == len(job1.dependencies)
 
 
 class TestJobGroup(unittest.TestCase):
@@ -128,38 +128,38 @@
     def test_create_jobgroup(self):
         """create dummy jobgroup."""
         jobgroup = pyani_jobs.JobGroup('empty', '')
-        assert_equal(jobgroup.script, self.emptyscript)
+        assert jobgroup.script == self.emptyscript
 
     def test_1d_jobgroup(self):
         """create dummy 1-parameter sweep jobgroup."""
         jobgroup = pyani_jobs.JobGroup('1d-sweep', 'cat', arguments=self.params1)
-        assert_equal(jobgroup.script, self.p1script)
-        assert_equal(3, jobgroup.tasks)
+        assert jobgroup.script == self.p1script
+        assert 3 == jobgroup.tasks
 
     def test_2d_jobgroup(self):
         """create dummy 2-parameter sweep jobgroup."""
         jobgroup = pyani_jobs.JobGroup('2d-sweep', 'myprog', arguments=self.params2)
-        assert_equal(jobgroup.script, self.p2script)
-        assert_equal(4, jobgroup.tasks)
+        assert jobgroup.script == self.p2script
+        assert 4 == jobgroup.tasks
 
     def test_add_dependency(self):
         """add jobgroup dependency."""
         jg1 = pyani_jobs.JobGroup('1d-sweep', 'cat', arguments=self.params1)
         jg2 = pyani_jobs.JobGroup('2d-sweep', 'myprog', arguments=self.params2)
         jg2.add_dependency(jg1)
-        assert_equal(4, jg2.tasks)
-        assert_equal(1, len(jg2.dependencies))
+        assert 4 == jg2.tasks
+        assert 1 == len(jg2.dependencies)
         dep = jg2.dependencies[0]
-        assert_equal(3, dep.tasks)
-        assert_equal('1d-sweep', dep.name)
+        assert 3 == dep.tasks
+        assert '1d-sweep' == dep.name
 
     def test_remove_dependency(self):
         """add and remove jobgroup dependency."""
         jg1 = pyani_jobs.JobGroup('1d-sweep', 'cat', arguments=self.params1)
         jg2 = pyani_jobs.JobGroup('2d-sweep', 'myprog', arguments=self.params2)
         jg2.add_dependency(jg1)
-        assert_equal(1, len(jg2.dependencies))
+        assert 1 == len(jg2.dependencies)
         dep = jg2.dependencies[0]
-        assert_equal('1d-sweep', dep.name)
+        assert '1d-sweep' == dep.name
         jg2.remove_dependency(dep)
-        assert_equal(0, len(jg2.dependencies))
+        assert 0 == len(jg2.dependencies)
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,6 +1,5 @@
 black
 flake8
-nose
 pytest
 pytest-cov
 setuptools
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,3 +1,3 @@
-nose
 coverage
-codecov
\ No newline at end of file
+codecov
+pytest
--- a/tests/README.md
+++ b/tests/README.md
@@ -6,40 +6,62 @@
 
 ### Dependencies
 
-The tests in this directory rely on the [`nose`](https://nose.readthedocs.org/en/latest/) package, which can be installed using
+The tests in this directory rely on the [`pytest`](https://docs.pytest.org/en/latest/) package, which can be installed using
 
 ```
-pip install nose
+pip install pytest
 ```
 
 ### Running tests
 
-To run the tests in this directory with `nose` run the following command:
+To run the tests in this directory with `pytest` run the following command:
 
 ```
-nosetests
+pytest
 ```
 
 This will run silently for quite a while (the comparisons are not quick), but should generate output that looks like this:
 
 ```
-$ nosetests
-........
-Thread 2: value 0
-Thread 2: value 1
-Thread 3: value 0
-Thread 3: value 1
-Thread 3: value 2
-Thread 1: value 0
-Thread 4: value 0
-Thread 4: value 1
-Thread 4: value 2
-Thread 4: value 3
-..
-----------------------------------------------------------------------
-Ran 14 tests in 804.833s
+$ pytest
+============================= test session starts ==============================
+platform linux -- Python 3.11.0+, pytest-7.1.2, pluggy-1.0.0+repack
+rootdir: /<<PKGBUILDDIR>>
+collected 56 items
+
+tests/test_anib.py sss.....sss...ss                                      [ 28%]
+tests/test_anim.py ......                                                [ 39%]
+tests/test_concordance.py .s..                                           [ 46%]
+tests/test_dependencies.py ......s.                                      [ 60%]
+tests/test_graphics.py ......                                            [ 71%]
+tests/test_jobs.py .........                                             [ 87%]
+tests/test_multiprocessing.py ...                                        [ 92%]
+tests/test_parsing.py .                                                  [ 94%]
+tests/test_tetra.py ...                                                  [100%]
+
+=============================== warnings summary ===============================
+../../../../../../usr/lib/python3/dist-packages/pyparsing/core.py:26
+  /usr/lib/python3/dist-packages/pyparsing/core.py:26: DeprecationWarning: module 'sre_constants' is deprecated
+    import sre_constants
+
+.pybuild/cpython3_3.11_pyani/build/tests/test_anib.py::TestParsing::test_blastdir_processing
+  /<<PKGBUILDDIR>>/.pybuild/cpython3_3.11_pyani/build/tests/test_anib.py:513: FutureWarning: In a future version of pandas all arguments of DataFrame.sort_index will be keyword-only
+    result.percentage_identity.sort_index(1).sort_index(),
+
+.pybuild/cpython3_3.11_pyani/build/tests/test_anib.py::TestParsing::test_blastdir_processing
+  /<<PKGBUILDDIR>>/.pybuild/cpython3_3.11_pyani/build/tests/test_anib.py:514: FutureWarning: In a future version of pandas all arguments of DataFrame.sort_index will be keyword-only
+    self.anibtgt.sort_index(1).sort_index(),
+
+.pybuild/cpython3_3.11_pyani/build/tests/test_anim.py::TestDeltafileProcessing::test_process_deltadir
+  /<<PKGBUILDDIR>>/.pybuild/cpython3_3.11_pyani/build/tests/test_anim.py:221: FutureWarning: In a future version of pandas all arguments of DataFrame.sort_index will be keyword-only
+    result.percentage_identity.sort_index(1).sort_index(),
+
+.pybuild/cpython3_3.11_pyani/build/tests/test_anim.py::TestDeltafileProcessing::test_process_deltadir
+  /<<PKGBUILDDIR>>/.pybuild/cpython3_3.11_pyani/build/tests/test_anim.py:222: FutureWarning: In a future version of pandas all arguments of DataFrame.sort_index will be keyword-only
+    self.df_pid.sort_index(1).sort_index(),
 
-OK
+-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
+============ 46 passed, 10 skipped, 5 warnings in 104.81s (0:01:44) ============
 ```
 
 
@@ -77,4 +99,4 @@
 
 The `test_ani_data` directory contains input files for testing `pyani`, and examples for comparative testing of graphics output.
 
-The `test_failing_data` directory contains input data that throws expected errors in ANI analysis, as described in `test_failing_data/README.md`.
\ No newline at end of file
+The `test_failing_data` directory contains input data that throws expected errors in ANI analysis, as described in `test_failing_data/README.md`.
--- a/tests/test_multiprocessing.py
+++ b/tests/test_multiprocessing.py
@@ -7,9 +7,9 @@
 
 These tests are intended to be run from the repository root using:
 
-nosetests -v
+pytest -v
 
-print() statements will be caught by nosetests unless there is an
+print() statements will be caught by pytest unless there is an
 error. They can also be recovered with the -s option.
 
 (c) The James Hutton Institute 2017
@@ -54,7 +54,7 @@
 import os
 import unittest
 
-from nose.tools import assert_equal, nottest
+import pytest
 
 from pyani import run_multiprocessing, pyani_jobs, anib
 
@@ -82,7 +82,7 @@
     def test_multiprocessing_run(self):
         """multiprocessing() runs basic jobs."""
         result = run_multiprocessing.multiprocessing_run(self.cmdlist)
-        assert_equal(0, result)
+        assert 0 == result
 
     def test_cmdsets(self):
         """module builds command sets."""
@@ -91,7 +91,7 @@
         job1.add_dependency(job2)
         cmdsets = run_multiprocessing.populate_cmdsets(job1, list(), depth=1)
         target = [{cmd} for cmd in self.cmds]
-        assert_equal(cmdsets, target)
+        assert cmdsets == target
 
     def test_dependency_graph_run(self):
         """module runs dependency graph."""
@@ -99,4 +99,4 @@
         blastcmds = anib.make_blastcmd_builder("ANIb", self.outdir)
         jobgraph = anib.make_job_graph(self.infiles, fragresult[0], blastcmds)
         result = run_multiprocessing.run_dependency_graph(jobgraph)
-        assert_equal(0, result)
+        assert 0 == result
--- a/tests/test_parsing.py
+++ b/tests/test_parsing.py
@@ -1,14 +1,10 @@
 #!/usr/bin/env python
 
-"""Tests for pyani package intermediate file parsing
-
-These tests are intended to be run using the nose package
-(see https://nose.readthedocs.org/en/latest/).
-"""
+"""Tests for pyani package intermediate file parsing"""
 
 import os
 
-from nose.tools import assert_equal
+import pytest
 from pyani import anim
 
 # Work out where we are. We need to do this to find related data files
@@ -24,6 +20,6 @@
 def test_anim_delta():
     """Test parsing of NUCmer delta file."""
     aln, sim = anim.parse_delta(DELTAFILE)
-    assert_equal(aln, 4073917)
-    assert_equal(sim, 2191)
+    assert aln == 4073917
+    assert sim == 2191
     print("Alignment length: {0}\nSimilarity Errors: {1}".format(aln, sim))
--- a/tests/test_tetra.py
+++ b/tests/test_tetra.py
@@ -7,10 +7,10 @@
 
 These tests are intended to be run from the repository root using:
 
-nosetests -v
+pytest -v
 
-print() statements will be caught by nosetests unless there is an
-error. They can also be recovered with the -s option.
+print() statements will be caught by pytest unless there is an
+error.
 
 (c) The James Hutton Institute 2017
 Author: Leighton Pritchard
@@ -57,7 +57,7 @@
 
 import pandas as pd
 
-from nose.tools import assert_equal, assert_false, assert_true
+import pytest
 from pandas.testing import assert_frame_equal
 
 from pyani import tetra
@@ -88,15 +88,15 @@
 
     def test_tetraclean(self):
         """detects unambiguous IUPAC symbols correctly."""
-        assert_false(tetra.tetra_clean("ACGTYACGTACNGTACGWTACGT"))
-        assert_true(tetra.tetra_clean("ACGTACGTACGTACGTACGTAC"))
+        assert tetra.tetra_clean("ACGTYACGTACNGTACGWTACGT") == False
+        assert tetra.tetra_clean("ACGTACGTACGTACGTACGTAC") == True
 
     def test_zscore(self):
         """TETRA Z-score calculated correctly."""
         tetra_z = tetra.calculate_tetra_zscore(self.infile)
         with open(os.path.join(self.tgtdir, "zscore.json"), "r") as ifh:
             target = json.load(ifh)
-        assert_equal(ordered(tetra_z), ordered(target))
+        assert ordered(tetra_z) == ordered(target)
 
     def test_correlations(self):
         """TETRA correlation calculated correctly."""
