Skip to content

Commit

Permalink
Remove f-strings for compatability with python 3 <3.6
Browse files Browse the repository at this point in the history
  • Loading branch information
oscarhiggott committed Apr 23, 2021
1 parent 31a1adc commit dcaeea3
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 21 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: [3.6, 3.7, 3.8, 3.9]
python-version: [3.5, 3.6, 3.7, 3.8, 3.9]

steps:
- uses: actions/checkout@v2
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
run: |
python -m cibuildwheel --output-dir wheelhouse
env:
CIBW_BUILD: "cp36-* cp37-* cp38-* cp39-*"
CIBW_BUILD: "cp*-*"
CIBW_SKIP: "*-win32"
- uses: actions/upload-artifact@v2
with:
Expand Down
18 changes: 9 additions & 9 deletions docs/toric-code-example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,8 @@
"Simulating L=4...\n",
"Simulating L=8...\n",
"Simulating L=12...\n",
"CPU times: user 1min 58s, sys: 592 ms, total: 1min 59s\n",
"Wall time: 2min 1s\n"
"CPU times: user 1min 56s, sys: 589 ms, total: 1min 57s\n",
"Wall time: 2min 3s\n"
]
}
],
Expand All @@ -201,7 +201,7 @@
"np.random.seed(2)\n",
"log_errors_all_L = []\n",
"for L in Ls:\n",
" print(f\"Simulating L={L}...\")\n",
" print(\"Simulating L={}...\".format(L))\n",
" Hx = toric_code_x_stabilisers(L)\n",
" logX = toric_code_x_logicals(L)\n",
" log_errors = []\n",
Expand Down Expand Up @@ -242,7 +242,7 @@
"plt.figure()\n",
"for L, logical_errors in zip(Ls, log_errors_all_L):\n",
" std_err = (logical_errors*(1-logical_errors)/num_trials)**0.5\n",
" plt.errorbar(ps, logical_errors, yerr=std_err, label=f\"L={L}\")\n",
" plt.errorbar(ps, logical_errors, yerr=std_err, label=\"L={}\".format(L))\n",
"plt.xlabel(\"Physical error rate\")\n",
"plt.ylabel(\"Logical error rate\")\n",
"plt.legend(loc=0);"
Expand Down Expand Up @@ -311,8 +311,8 @@
"Simulating L=8...\n",
"Simulating L=10...\n",
"Simulating L=12...\n",
"CPU times: user 13min 22s, sys: 3.07 s, total: 13min 25s\n",
"Wall time: 13min 31s\n"
"CPU times: user 13min 40s, sys: 4.17 s, total: 13min 44s\n",
"Wall time: 14min 15s\n"
]
}
],
Expand All @@ -324,7 +324,7 @@
"ps = np.linspace(0.02, 0.04, 7)\n",
"log_errors_all_L = []\n",
"for L in Ls:\n",
" print(f\"Simulating L={L}...\")\n",
" print(\"Simulating L={}...\".format(L))\n",
" Hx = toric_code_x_stabilisers(L)\n",
" logX = toric_code_x_logicals(L)\n",
" log_errors = []\n",
Expand All @@ -338,7 +338,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Plotting the results, we find a threshold of around 3%, consistent with the threshold of 2.93% found in [this paper](https://arxiv.org/abs/quant-ph/0207088):"
"Plotting the results, we find a threshold of around 3%, consistent with the threshold of 2.9% found in [this paper](https://arxiv.org/abs/quant-ph/0207088):"
]
},
{
Expand All @@ -365,7 +365,7 @@
"plt.figure()\n",
"for L, logical_errors in zip(Ls, log_errors_all_L):\n",
" std_err = (logical_errors*(1-logical_errors)/num_trials)**0.5\n",
" plt.errorbar(ps, logical_errors, yerr=std_err, label=f\"L={L}\")\n",
" plt.errorbar(ps, logical_errors, yerr=std_err, label=\"L={}\".format(L))\n",
"plt.yscale(\"log\")\n",
"plt.xlabel(\"Physical error rate\")\n",
"plt.ylabel(\"Logical error rate\")\n",
Expand Down
2 changes: 1 addition & 1 deletion docs/usage.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@
],
"source": [
"c = m.decode(z)\n",
"print(f\"c: {c}, of type {type(c)}\")"
"print(\"c: {}, of type {}\".format(c, type(c)))"
]
},
{
Expand Down
19 changes: 10 additions & 9 deletions src/pymatching/matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,20 +278,20 @@ def decode(self, z, num_neighbours=20, return_weight=False):
z = np.array(z, dtype=np.uint8)
except:
raise ValueError("Syndrome must be of type numpy.ndarray or "\
f"convertible to numpy.ndarray, not {z}")
"convertible to numpy.ndarray, not {}".format(z))
if len(z.shape) == 1 and (self.num_stabilisers <= z.shape[0]
<= self.num_stabilisers+len(self.boundary)):
defects = z.nonzero()[0]
elif len(z.shape) == 2 and z.shape[0] == self.num_stabilisers:
num_stabs = self.stabiliser_graph.get_num_nodes()
max_num_defects = z.shape[0]*z.shape[1]
if max_num_defects > num_stabs:
raise ValueError(f"Syndrome size {z.shape[0]}x{z.shape[1]} exceeds" \
f" the number of stabilisers ({num_stabs})")
raise ValueError("Syndrome size {}x{} exceeds" \
" the number of stabilisers ({})".format(z.shape[0],z.shape[1],num_stabs))
times, checks = z.T.nonzero()
defects = times*self.num_stabilisers + checks
else:
raise ValueError(f"The shape ({z.shape}) of the syndrome vector z is not valid.")
raise ValueError("The shape ({}) of the syndrome vector z is not valid.".format(z.shape))
if len(defects) % 2 != 0:
if len(self.boundary) == 0:
raise ValueError("Syndrome must contain an even number of defects "
Expand Down Expand Up @@ -409,8 +409,9 @@ def __repr__(self):
M = self.num_stabilisers
B = len(self.boundary)
E = self.stabiliser_graph.get_num_edges()
return (f"<pymatching.Matching object with "
f"{N} qubit{'s' if N != 1 else ''},"
f" {M} stabiliser{'s' if M != 1 else ''}, "
f"{B} boundary node{'s' if B != 1 else ''}, and "
f"{E} edge{'s' if E != 1 else ''}>")
return "<pymatching.Matching object with "\
"{} qubit{}, {} stabiliser{}, "\
"{} boundary node{}, "\
"and {} edge{}>".format(N, 's' if N != 1 else '',
M, 's' if M != 1 else '', B, 's' if B != 1 else '',
E, 's' if E != 1 else '')

0 comments on commit dcaeea3

Please sign in to comment.