diff --git a/.ruff.toml b/.ruff.toml index d4c0db0e7ea..06297b68892 100644 --- a/.ruff.toml +++ b/.ruff.toml @@ -29,7 +29,6 @@ ignore = [ # flake8-comprehensions (C4) "C400", # unnecessary-generator-list "C406", # unnecessary-literal-dict - "C408", # unnecessary-collection-call "C413", # unnecessary-call-around-sorted "C416", # unnecessary-comprehension "C414", # unnecessary-double-cast-or-process @@ -285,26 +284,26 @@ unfixable = [ # When a general exclusion is being fixed, but it affects many subpackages, it # is better to fix for subpackages individually. The general exclusion should be # copied to these subpackage sections and fixed there. -"astropy/__init__.py" = [] +"astropy/__init__.py" = ["C408"] "astropy/config/*" = [] "astropy/constants/*" = [] "astropy/convolution/*" = ["I001"] -"astropy/coordinates/*" = ["I001"] -"astropy/cosmology/*" = [] +"astropy/coordinates/*" = ["I001", "C408"] +"astropy/cosmology/*" = ["C408"] "astropy/io/*" = ["I001"] -"astropy/logger.py" = [] -"astropy/modeling/*" = ["I001"] -"astropy/nddata/*" = ["I001"] +"astropy/logger.py" = ["C408"] +"astropy/modeling/*" = ["I001", "C408"] +"astropy/nddata/*" = ["I001", "C408"] "astropy/samp/*" = [] "astropy/stats/*" = ["I001"] -"astropy/table/*" = ["I001"] -"astropy/tests/*" = [] -"astropy/time/*" = ["I001"] -"astropy/timeseries/*" = ["I001"] -"astropy/units/*" = ["I001"] -"astropy/uncertainty/*" = [] -"astropy/utils/*" = [] -"astropy/visualization/*" = ["I001"] -"astropy/wcs/*" = ["I001"] +"astropy/table/*" = ["I001", "C408"] +"astropy/tests/*" = ["C408"] +"astropy/time/*" = ["I001", "C408"] +"astropy/timeseries/*" = ["I001", "C408"] +"astropy/units/*" = ["I001", "C408"] +"astropy/uncertainty/*" = ["C408"] +"astropy/utils/*" = ["C408"] +"astropy/visualization/*" = ["I001", "C408"] +"astropy/wcs/*" = ["I001", "C408"] "docs/*" = [] "examples/coordinates/*" = [] diff --git a/astropy/io/ascii/tests/test_c_reader.py b/astropy/io/ascii/tests/test_c_reader.py index 2601bb5b0b5..58077a7b2dc 100644 --- a/astropy/io/ascii/tests/test_c_reader.py +++ b/astropy/io/ascii/tests/test_c_reader.py @@ -1221,7 +1221,7 @@ def test_read_big_table2(tmp_path): # fast_reader configurations: False| 'use_fast_converter'=False|True @pytest.mark.parametrize( "fast_reader", - [False, dict(use_fast_converter=False), dict(use_fast_converter=True)], + [False, {"use_fast_converter": False}, {"use_fast_converter": True}], ) @pytest.mark.parametrize("parallel", [False, True]) def test_data_out_of_range(parallel, fast_reader, guess): @@ -1335,7 +1335,7 @@ def test_data_out_of_range(parallel, fast_reader, guess): # fast_reader configurations: False| 'use_fast_converter'=False|True @pytest.mark.parametrize( "fast_reader", - [False, dict(use_fast_converter=False), dict(use_fast_converter=True)], + [False, {"use_fast_converter": False}, {"use_fast_converter": True}], ) @pytest.mark.parametrize("parallel", [False, True]) def test_data_at_range_limit(parallel, fast_reader, guess): @@ -1648,7 +1648,7 @@ def test_fortran_reader_notbasic(): """ )[1:-1] - t1 = ascii.read(tabstr.split("\n"), fast_reader=dict(exponent_style="D")) + t1 = ascii.read(tabstr.split("\n"), fast_reader={"exponent_style": "D"}) assert t1["b"].dtype.kind == "f" @@ -1663,7 +1663,7 @@ def test_fortran_reader_notbasic(): )[1:-1] t2 = ascii.read( - tabrdb.split("\n"), format="rdb", fast_reader=dict(exponent_style="fortran") + tabrdb.split("\n"), format="rdb", fast_reader={"exponent_style": "fortran"} ) assert t2["b"].dtype.kind == "f" @@ -1701,7 +1701,7 @@ def test_fortran_reader_notbasic(): tabrst.split("\n"), format="rst", guess=False, - fast_reader=dict(use_fast_converter=False), + fast_reader={"use_fast_converter": False}, ) tabrst = tabrst.replace("E", "D") @@ -1711,13 +1711,13 @@ def test_fortran_reader_notbasic(): tabrst.split("\n"), format="rst", guess=False, - fast_reader=dict(exponent_style="D"), + fast_reader={"exponent_style": "D"}, ) @pytest.mark.parametrize("guess", [True, False]) @pytest.mark.parametrize( - "fast_reader", [dict(exponent_style="D"), dict(exponent_style="A")] + "fast_reader", [{"exponent_style": "D"}, {"exponent_style": "A"}] ) def test_dict_kwarg_integrity(fast_reader, guess): """ @@ -1732,7 +1732,7 @@ def test_dict_kwarg_integrity(fast_reader, guess): @pytest.mark.parametrize( - "fast_reader", [False, dict(parallel=True), dict(parallel=False)] + "fast_reader", [False, {"parallel": True}, {"parallel": False}] ) def test_read_empty_basic_table_with_comments(fast_reader): """ @@ -1751,7 +1751,7 @@ def test_read_empty_basic_table_with_comments(fast_reader): @pytest.mark.parametrize( - "fast_reader", [dict(use_fast_converter=True), dict(exponent_style="A")] + "fast_reader", [{"use_fast_converter": True}, {"exponent_style": "A"}] ) def test_conversion_fast(fast_reader): """ diff --git a/astropy/io/ascii/tests/test_cds.py b/astropy/io/ascii/tests/test_cds.py index 0f5523cc9ea..c68e6419cc9 100644 --- a/astropy/io/ascii/tests/test_cds.py +++ b/astropy/io/ascii/tests/test_cds.py @@ -192,9 +192,9 @@ def test_write_byte_by_byte_for_masked_column(): assert lines == exp_output -exp_coord_cols_output = dict( +exp_coord_cols_output = { # fmt: off - generic=[ + "generic": [ '================================================================================', 'Byte-by-byte Description of file: table.dat', '--------------------------------------------------------------------------------', @@ -220,7 +220,7 @@ def test_write_byte_by_byte_for_masked_column(): 'HD81809 1e-07 22.25608 2e+00 67 5.0 20 22 02 15.4500000000 -61 39 34.599996000', 'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000', ], - positive_de=[ + "positive_de": [ '================================================================================', 'Byte-by-byte Description of file: table.dat', '--------------------------------------------------------------------------------', @@ -247,7 +247,7 @@ def test_write_byte_by_byte_for_masked_column(): 'HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 12 48 15.2244072000 +17 46 26.496624000', ], # fmt: on - galactic=[ + "galactic": [ "================================================================================", "Byte-by-byte Description of file: table.dat", "--------------------------------------------------------------------------------", @@ -268,7 +268,7 @@ def test_write_byte_by_byte_for_masked_column(): "HD81809 1e-07 22.25608 2e+00 67 5.0 20 330.071639591690 -45.548080484609", "HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 330.071639591690 -45.548080484609", ], - ecliptic=[ + "ecliptic": [ "================================================================================", "Byte-by-byte Description of file: table.dat", "--------------------------------------------------------------------------------", @@ -289,7 +289,7 @@ def test_write_byte_by_byte_for_masked_column(): "HD81809 1e-07 22.25608 2e+00 67 5.0 20 306.224208650096 -45.621789850825", "HD103095 -3e+06 27.25000 -9e+34 -30 5.0 20 306.224208650096 -45.621789850825", ], -) +} def test_write_coord_cols(): diff --git a/astropy/io/ascii/tests/test_read.py b/astropy/io/ascii/tests/test_read.py index 3b9fedacbea..eccdc568a02 100644 --- a/astropy/io/ascii/tests/test_read.py +++ b/astropy/io/ascii/tests/test_read.py @@ -1563,9 +1563,9 @@ def test_table_with_no_newline(): # Put a single line of column names but with no newline for kwargs in [ - dict(), - dict(guess=False, fast_reader=False, format="basic"), - dict(guess=False, fast_reader=True, format="fast_basic"), + {}, + {"guess": False, "fast_reader": False, "format": "basic"}, + {"guess": False, "fast_reader": True, "format": "fast_basic"}, ]: table = BytesIO() table.write(b"a b") @@ -1848,7 +1848,7 @@ def test_read_non_ascii(): def test_kwargs_dict_guess(enable): """Test that fast_reader dictionary is preserved through guessing sequence.""" # Fails for enable=(True, 'force') - #5578 - ascii.read("a\tb\n 1\t2\n3\t 4.0", fast_reader=dict(enable=enable)) + ascii.read("a\tb\n 1\t2\n3\t 4.0", fast_reader={"enable": enable}) assert get_read_trace()[-1]["kwargs"]["Reader"] is ( ascii.Tab if (enable is False) else ascii.FastTab ) diff --git a/astropy/io/ascii/tests/test_write.py b/astropy/io/ascii/tests/test_write.py index a1fe1c1db40..4030ad5d12a 100644 --- a/astropy/io/ascii/tests/test_write.py +++ b/astropy/io/ascii/tests/test_write.py @@ -21,85 +21,85 @@ from .common import setup_function, teardown_function # noqa: F401 test_defs = [ - dict( - kwargs=dict(), - out="""\ + { + "kwargs": {}, + "out": """\ ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """, - ), - dict( - kwargs=dict(delimiter=None), - out="""\ + }, + { + "kwargs": {"delimiter": None}, + "out": """\ ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """, - ), - dict( - kwargs=dict( - formats={"XCENTER": "%12.1f", "YCENTER": "{0:.1f}"}, - include_names=["XCENTER", "YCENTER"], - strip_whitespace=False, - ), - out="""\ + }, + { + "kwargs": { + "formats": {"XCENTER": "%12.1f", "YCENTER": "{0:.1f}"}, + "include_names": ["XCENTER", "YCENTER"], + "strip_whitespace": False, + }, + "out": """\ XCENTER YCENTER " 138.5" 256.4 " 18.1" 280.2 """, - ), - dict( - kwargs=dict(Writer=ascii.Rdb, exclude_names=["CHI"]), - out="""\ + }, + { + "kwargs": {"Writer": ascii.Rdb, "exclude_names": ["CHI"]}, + "out": """\ ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tPIER\tPERROR N\tN\tN\tN\tN\tN\tN\tN\tN\tS 14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0\tNo_error 18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t0\tNo_error """, - ), - dict( - kwargs=dict(Writer=ascii.Tab), - out="""\ + }, + { + "kwargs": {"Writer": ascii.Tab}, + "out": """\ ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tCHI\tPIER\tPERROR 14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0.802\t0\tNo_error 18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t1.104\t0\tNo_error """, - ), - dict( - kwargs=dict(Writer=ascii.Csv), - out="""\ + }, + { + "kwargs": {"Writer": ascii.Csv}, + "out": """\ ID,XCENTER,YCENTER,MAG,MERR,MSKY,NITER,SHARPNESS,CHI,PIER,PERROR 14,138.538,256.405,15.461,0.003,34.85955,4,-0.032,0.802,0,No_error 18,18.114,280.170,22.329,0.206,30.12784,4,-2.544,1.104,0,No_error """, - ), - dict( - kwargs=dict(Writer=ascii.NoHeader), - out="""\ + }, + { + "kwargs": {"Writer": ascii.NoHeader}, + "out": """\ 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """, - ), - dict( - kwargs=dict(Writer=ascii.CommentedHeader), - out="""\ + }, + { + "kwargs": {"Writer": ascii.CommentedHeader}, + "out": """\ # ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """, - ), - dict( - kwargs=dict(Writer=ascii.CommentedHeader, comment="&"), - out="""\ + }, + { + "kwargs": {"Writer": ascii.CommentedHeader, "comment": "&"}, + "out": """\ &ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """, - ), - dict( - kwargs=dict(Writer=ascii.Latex), - out="""\ + }, + { + "kwargs": {"Writer": ascii.Latex}, + "out": """\ \\begin{table} \\begin{tabular}{ccccccccccc} ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\ @@ -109,10 +109,10 @@ \\end{tabular} \\end{table} """, - ), - dict( - kwargs=dict(Writer=ascii.AASTex), - out="""\ + }, + { + "kwargs": {"Writer": ascii.AASTex}, + "out": """\ \\begin{deluxetable}{ccccccccccc} \\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{pixels} & \\colhead{pixels} & \\colhead{magnitudes} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}} \\startdata @@ -121,18 +121,18 @@ \\enddata \\end{deluxetable} """, - ), - dict( - kwargs=dict( - Writer=ascii.AASTex, - caption="Mag values \\label{tab1}", - latexdict={ + }, + { + "kwargs": { + "Writer": ascii.AASTex, + "caption": "Mag values \\label{tab1}", + "latexdict": { "units": {"MAG": "[mag]", "XCENTER": "[pixel]"}, "tabletype": "deluxetable*", "tablealign": "htpb", }, - ), - out="""\ + }, + "out": """\ \\begin{deluxetable*}{ccccccccccc}[htpb] \\tablecaption{Mag values \\label{tab1}} \\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{[pixel]} & \\colhead{pixels} & \\colhead{[mag]} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}} @@ -142,12 +142,12 @@ \\enddata \\end{deluxetable*} """, - ), - dict( - kwargs=dict( - Writer=ascii.Latex, - caption="Mag values \\label{tab1}", - latexdict={ + }, + { + "kwargs": { + "Writer": ascii.Latex, + "caption": "Mag values \\label{tab1}", + "latexdict": { "preamble": "\\begin{center}", "tablefoot": "\\end{center}", "data_end": ["\\hline", "\\hline"], @@ -155,9 +155,9 @@ "tabletype": "table*", "tablealign": "h", }, - col_align="|lcccccccccc|", - ), - out="""\ + "col_align": "|lcccccccccc|", + }, + "out": """\ \\begin{table*}[h] \\begin{center} \\caption{Mag values \\label{tab1}} @@ -172,10 +172,10 @@ \\end{center} \\end{table*} """, - ), - dict( - kwargs=dict(Writer=ascii.Latex, latexdict=ascii.latexdicts["template"]), - out="""\ + }, + { + "kwargs": {"Writer": ascii.Latex, "latexdict": ascii.latexdicts["template"]}, + "out": """\ \\begin{tabletype}[tablealign] preamble \\caption{caption} @@ -192,10 +192,10 @@ tablefoot \\end{tabletype} """, - ), - dict( - kwargs=dict(Writer=ascii.Latex, latexdict={"tabletype": None}), - out="""\ + }, + { + "kwargs": {"Writer": ascii.Latex, "latexdict": {"tabletype": None}}, + "out": """\ \\begin{tabular}{ccccccccccc} ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\ & pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\ @@ -203,12 +203,13 @@ 18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\ \\end{tabular} """, - ), - dict( - kwargs=dict( - Writer=ascii.HTML, htmldict={"css": "table,th,td{border:1px solid black;"} - ), - out="""\ + }, + { + "kwargs": { + "Writer": ascii.HTML, + "htmldict": {"css": "table,th,td{border:1px solid black;"}, + }, + "out": """\ @@ -263,10 +264,10 @@ """, - ), - dict( - kwargs=dict(Writer=ascii.Ipac), - out="""\ + }, + { + "kwargs": {"Writer": ascii.Ipac}, + "out": """\ \\MERGERAD='INDEF' \\IRAF='NOAO/IRAFV2.10EXPORT' \\USER='' @@ -306,13 +307,13 @@ 14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error 18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error """, - ), + }, ] test_defs_no_data = [ - dict( - kwargs=dict(Writer=ascii.Ipac), - out="""\ + { + "kwargs": {"Writer": ascii.Ipac}, + "out": """\ \\ This is an example of a valid comment. \\ The 2nd data line is used to verify the exact column parsing \\ (unclear if this is a valid for the IPAC format) @@ -324,109 +325,110 @@ | unit| unit|unit| unit| ergs| | null| null|null| null| null| """, - ), + }, ] tab_to_fill = ["a b c", "1 2 3", "1 1 3"] test_defs_fill_value = [ - dict( - kwargs=dict(), - out="""\ + { + "kwargs": {}, + "out": """\ a b c 1 2 3 1 1 3 """, - ), - dict( - kwargs=dict(fill_values=("1", "w")), - out="""\ + }, + { + "kwargs": {"fill_values": ("1", "w")}, + "out": """\ a b c w 2 3 w w 3 """, - ), - dict( - kwargs=dict(fill_values=("1", "w", "b")), - out="""\ + }, + { + "kwargs": {"fill_values": ("1", "w", "b")}, + "out": """\ a b c 1 2 3 1 w 3 """, - ), - dict( - kwargs=dict(fill_values=("1", "w"), fill_include_names=["b"]), - out="""\ + }, + { + "kwargs": {"fill_values": ("1", "w"), "fill_include_names": ["b"]}, + "out": """\ a b c 1 2 3 1 w 3 """, - ), - dict( - kwargs=dict(fill_values=("1", "w"), fill_exclude_names=["a"]), - out="""\ + }, + { + "kwargs": {"fill_values": ("1", "w"), "fill_exclude_names": ["a"]}, + "out": """\ a b c 1 2 3 1 w 3 """, - ), - dict( - kwargs=dict( - fill_values=("1", "w"), - fill_include_names=["a"], - fill_exclude_names=["a", "b"], - ), - out="""\ + }, + { + "kwargs": { + "fill_values": ("1", "w"), + "fill_include_names": ["a"], + "fill_exclude_names": ["a", "b"], + }, + "out": """\ a b c 1 2 3 1 1 3 """, - ), - dict( - kwargs=dict(fill_values=[("1", "w")], formats={"a": "%4.2f"}), - out="""\ + }, + { + "kwargs": {"fill_values": [("1", "w")], "formats": {"a": "%4.2f"}}, + "out": """\ a b c 1.00 2 3 1.00 w 3 """, - ), + }, ] test_def_masked_fill_value = [ - dict( - kwargs=dict(), - out="""\ + { + "kwargs": {}, + "out": """\ a b c "" 2 3 1 1 "" """, - ), - dict( - kwargs=dict(fill_values=[("1", "w"), (ascii.masked, "X")]), - out="""\ + }, + { + "kwargs": {"fill_values": [("1", "w"), (ascii.masked, "X")]}, + "out": """\ a b c X 2 3 w w X """, - ), - dict( - kwargs=dict( - fill_values=[("1", "w"), (ascii.masked, "XXX")], formats={"a": "%4.1f"} - ), - out="""\ + }, + { + "kwargs": { + "fill_values": [("1", "w"), (ascii.masked, "XXX")], + "formats": {"a": "%4.1f"}, + }, + "out": """\ a b c XXX 2 3 1.0 w XXX """, - ), - dict( - kwargs=dict(Writer=ascii.Csv), - out="""\ + }, + { + "kwargs": {"Writer": ascii.Csv}, + "out": """\ a,b,c ,2,3 1,1, """, - ), + }, ] diff --git a/astropy/io/ascii/ui.py b/astropy/io/ascii/ui.py index 5eff43267bd..162281cd80b 100644 --- a/astropy/io/ascii/ui.py +++ b/astropy/io/ascii/ui.py @@ -682,11 +682,11 @@ def _get_guess_kwargs_list(read_kwargs): # If the table is probably HTML based on some heuristics then start with the # HTML reader. if read_kwargs.pop("guess_html", None): - guess_kwargs_list.append(dict(Reader=html.HTML)) + guess_kwargs_list.append({"Reader": html.HTML}) # Start with ECSV because an ECSV file will be read by Basic. This format # has very specific header requirements and fails out quickly. - guess_kwargs_list.append(dict(Reader=ecsv.Ecsv)) + guess_kwargs_list.append({"Reader": ecsv.Ecsv}) # Now try readers that accept the user-supplied keyword arguments # (actually include all here - check for compatibility of arguments later). @@ -709,7 +709,7 @@ def _get_guess_kwargs_list(read_kwargs): latex.Latex, latex.AASTex, ): - guess_kwargs_list.append(dict(Reader=reader)) + guess_kwargs_list.append({"Reader": reader}) # Cycle through the basic-style readers using all combinations of delimiter # and quotechar. @@ -724,7 +724,7 @@ def _get_guess_kwargs_list(read_kwargs): for delimiter in ("|", ",", " ", r"\s"): for quotechar in ('"', "'"): guess_kwargs_list.append( - dict(Reader=Reader, delimiter=delimiter, quotechar=quotechar) + {"Reader": Reader, "delimiter": delimiter, "quotechar": quotechar} ) return guess_kwargs_list diff --git a/astropy/io/fits/card.py b/astropy/io/fits/card.py index 89a5c0c0e86..3480bc02915 100644 --- a/astropy/io/fits/card.py +++ b/astropy/io/fits/card.py @@ -1123,14 +1123,14 @@ def _verify(self, option="warn"): and self._image.find("=") != 8 ): errs.append( - dict( - err_text=( + { + "err_text": ( "Card {!r} is not FITS standard (equal sign not " "at column 8).".format(self.keyword) ), - fix_text=fix_text, - fix=self._fix_value, - ) + "fix_text": fix_text, + "fix": self._fix_value, + } ) # verify the key, it is never fixable @@ -1146,11 +1146,11 @@ def _verify(self, option="warn"): if keyword != keyword.upper(): # Keyword should be uppercase unless it's a HIERARCH card errs.append( - dict( - err_text=f"Card keyword {keyword!r} is not upper case.", - fix_text=fix_text, - fix=self._fix_keyword, - ) + { + "err_text": f"Card keyword {keyword!r} is not upper case.", + "fix_text": fix_text, + "fix": self._fix_keyword, + } ) keyword = self.keyword @@ -1159,7 +1159,7 @@ def _verify(self, option="warn"): if not self._keywd_FSC_RE.match(keyword): errs.append( - dict(err_text=f"Illegal keyword name {keyword!r}", fixable=False) + {"err_text": f"Illegal keyword name {keyword!r}", "fixable": False} ) # verify the value, it may be fixable @@ -1169,13 +1169,13 @@ def _verify(self, option="warn"): # contains only printable ASCII characters if not self._ascii_text_re.match(valuecomment): errs.append( - dict( - err_text=( + { + "err_text": ( f"Unprintable string {valuecomment!r}; commentary " "cards may only contain printable ASCII characters" ), - fixable=False, - ) + "fixable": False, + } ) else: if not self._valuemodified: @@ -1186,14 +1186,14 @@ def _verify(self, option="warn"): # https://github.com/astropy/astropy/issues/5408 if m is None: errs.append( - dict( - err_text=( + { + "err_text": ( f"Card {self.keyword!r} is not FITS standard " f"(invalid value string: {valuecomment!r})." ), - fix_text=fix_text, - fix=self._fix_value, - ) + "fix_text": fix_text, + "fix": self._fix_value, + } ) # verify the comment (string), it is never fixable @@ -1203,13 +1203,13 @@ def _verify(self, option="warn"): if comment is not None: if not self._ascii_text_re.match(comment): errs.append( - dict( - err_text=( + { + "err_text": ( f"Unprintable string {comment!r}; header comments " "may only contain printable ASCII characters" ), - fixable=False, - ) + "fixable": False, + } ) errs = _ErrList([self.run_option(option, **err) for err in errs]) diff --git a/astropy/io/fits/column.py b/astropy/io/fits/column.py index e4348a67604..3456bf7e2e3 100644 --- a/astropy/io/fits/column.py +++ b/astropy/io/fits/column.py @@ -1170,7 +1170,7 @@ def _verify_keywords( # TODO: This should be checked by the FITS verification code if dim is not None and dim != "": msg = None - dims_tuple = tuple() + dims_tuple = () # NOTE: If valid, the dim keyword's value in the the valid dict is # a tuple, not the original string; if invalid just the original # string is returned @@ -2376,7 +2376,7 @@ def _parse_tdim(tdim): return tuple(int(d.strip()) for d in dims.split(","))[::-1] # Ignore any dim values that don't specify a multidimensional column - return tuple() + return () def _scalar_to_format(value): diff --git a/astropy/io/fits/connect.py b/astropy/io/fits/connect.py index 210af2b96c2..01c84061ae7 100644 --- a/astropy/io/fits/connect.py +++ b/astropy/io/fits/connect.py @@ -183,7 +183,7 @@ def read_table_fits( """ if isinstance(input, HDUList): # Parse all table objects - tables = dict() + tables = {} for ihdu, hdu_item in enumerate(input): if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)): tables[ihdu] = hdu_item diff --git a/astropy/io/fits/file.py b/astropy/io/fits/file.py index fb861f85887..542bb92df50 100644 --- a/astropy/io/fits/file.py +++ b/astropy/io/fits/file.py @@ -507,7 +507,7 @@ def _try_read_compressed(self, obj_or_name, magic, mode, ext=""): "Use 'update' mode instead" ) # Handle gzip files - kwargs = dict(mode=IO_FITS_MODES[mode]) + kwargs = {"mode": IO_FITS_MODES[mode]} if isinstance(obj_or_name, str): kwargs["filename"] = obj_or_name else: diff --git a/astropy/io/fits/tests/test_connect.py b/astropy/io/fits/tests/test_connect.py index be2c7abc696..12a4e81b254 100644 --- a/astropy/io/fits/tests/test_connect.py +++ b/astropy/io/fits/tests/test_connect.py @@ -179,7 +179,7 @@ def test_read_with_unit_aliases(self, table_type): hdu = BinTableHDU(self.data) hdu.columns[0].unit = "Angstroms" hdu.columns[2].unit = "ergs/(cm.s.Angstroms)" - with u.set_enabled_aliases(dict(Angstroms=u.AA, ergs=u.erg)): + with u.set_enabled_aliases({"Angstroms": u.AA, "ergs": u.erg}): t = table_type.read(hdu) assert t["a"].unit == u.AA assert t["c"].unit == u.erg / (u.cm * u.s * u.AA) diff --git a/astropy/io/misc/yaml.py b/astropy/io/misc/yaml.py index 3f55c97dc91..9f7e90f9049 100644 --- a/astropy/io/misc/yaml.py +++ b/astropy/io/misc/yaml.py @@ -121,12 +121,12 @@ def _ndarray_representer(dumper, obj): data_b64 = base64.b64encode(obj.tobytes()) - out = dict( - buffer=data_b64, - dtype=str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr, - shape=obj.shape, - order=order, - ) + out = { + "buffer": data_b64, + "dtype": str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr, + "shape": obj.shape, + "order": order, + } return dumper.represent_mapping("!numpy.ndarray", out) @@ -143,10 +143,10 @@ def _ndarray_constructor(loader, node): def _void_representer(dumper, obj): data_b64 = base64.b64encode(obj.tobytes()) - out = dict( - buffer=data_b64, - dtype=str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr, - ) + out = { + "buffer": data_b64, + "dtype": str(obj.dtype) if not obj.dtype.fields else obj.dtype.descr, + } return dumper.represent_mapping("!numpy.void", out) diff --git a/astropy/io/registry/base.py b/astropy/io/registry/base.py index 1747c11896d..ab78149d89c 100644 --- a/astropy/io/registry/base.py +++ b/astropy/io/registry/base.py @@ -42,8 +42,11 @@ def __init__(self): self._identifiers = OrderedDict() # what this class can do: e.g. 'read' &/or 'write' - self._registries = dict() - self._registries["identify"] = dict(attr="_identifiers", column="Auto-identify") + self._registries = {} + self._registries["identify"] = { + "attr": "_identifiers", + "column": "Auto-identify", + } self._registries_order = ("identify",) # match keys in `_registries` # If multiple formats are added to one class the update of the docs is quite diff --git a/astropy/io/registry/core.py b/astropy/io/registry/core.py index a89b72b4819..30d3d255d40 100644 --- a/astropy/io/registry/core.py +++ b/astropy/io/registry/core.py @@ -67,7 +67,7 @@ def my_table_reader(filename, some_option=1): def __init__(self): super().__init__() # set _identifiers self._readers = OrderedDict() - self._registries["read"] = dict(attr="_readers", column="Read") + self._registries["read"] = {"attr": "_readers", "column": "Read"} self._registries_order = ("read", "identify") # ========================================================================= @@ -246,7 +246,7 @@ class UnifiedOutputRegistry(_UnifiedIORegistryBase): def __init__(self): super().__init__() self._writers = OrderedDict() - self._registries["write"] = dict(attr="_writers", column="Write") + self._registries["write"] = {"attr": "_writers", "column": "Write"} self._registries_order = ("write", "identify") # ========================================================================= diff --git a/astropy/io/registry/tests/test_registries.py b/astropy/io/registry/tests/test_registries.py index 188d7a7b005..74d83a57290 100644 --- a/astropy/io/registry/tests/test_registries.py +++ b/astropy/io/registry/tests/test_registries.py @@ -261,7 +261,7 @@ def test_compat_unregister_identifier(self, registry, fmtcls1): def test_compat_identify_format(self, registry, fmtcls1): fmt, cls = fmtcls1 - args = (None, cls, None, None, (None,), dict()) + args = (None, cls, None, None, (None,), {}) # with registry specified registry.register_identifier(*fmtcls1, empty_identifier) @@ -360,7 +360,7 @@ def test_delay_doc_updates(self, registry, fmtcls1): def test_identify_read_format(self, registry): """Test ``registry.identify_format()``.""" - args = ("read", EmptyData, None, None, (None,), dict()) + args = ("read", EmptyData, None, None, (None,), {}) # test there is no format to identify formats = registry.identify_format(*args) diff --git a/astropy/io/votable/connect.py b/astropy/io/votable/connect.py index 86f48f6ccfb..4f5e43d0c72 100644 --- a/astropy/io/votable/connect.py +++ b/astropy/io/votable/connect.py @@ -88,7 +88,7 @@ def read_table_votable( input = parse(input, table_id=table_id, verify=verify, **kwargs) # Parse all table objects - table_id_mapping = dict() + table_id_mapping = {} tables = [] if isinstance(input, VOTableFile): for table in input.iter_tables(): diff --git a/astropy/io/votable/exceptions.py b/astropy/io/votable/exceptions.py index aa18e3e2c4a..af821c70a28 100644 --- a/astropy/io/votable/exceptions.py +++ b/astropy/io/votable/exceptions.py @@ -84,7 +84,7 @@ def _format_message(message, name, config=None, pos=None): def _suppressed_warning(warning, config, stacklevel=2): warning_class = type(warning) - config.setdefault("_warning_counts", dict()).setdefault(warning_class, 0) + config.setdefault("_warning_counts", {}).setdefault(warning_class, 0) config["_warning_counts"][warning_class] += 1 message_count = config["_warning_counts"][warning_class] if message_count <= conf.max_warnings: