Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support parallelism in noisy simulator #97

Merged
merged 12 commits into from
Mar 18, 2024
65 changes: 55 additions & 10 deletions qermit/noise_model/transpiler_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
from pytket.utils.outcomearray import OutcomeArray
import uuid
from pytket.passes import BasePass, CustomPass
from typing import Dict, List, Optional, Iterator, Sequence, Iterable
from typing import Dict, List, Optional, Iterator, Sequence, Iterable, Tuple
from pytket import Circuit, Bit
from pytket.backends.resulthandle import ResultHandle
import multiprocessing


class TranspilerBackend:
Expand All @@ -22,35 +23,45 @@ class TranspilerBackend:
shot batch size permitted.
result_dict: A dictionary mapping handles to results.
backend: Backend used to simulate compiled circuits.
n_cores: The number of cores used when simulating shots in parallel.
"""

transpiler: BasePass
max_batch_size: int
result_dict: Dict[ResultHandle, BackendResult]
n_cores: Optional[int]
backend = AerBackend()

def __init__(
self,
transpiler: BasePass,
max_batch_size: int = 100,
result_dict: Dict[ResultHandle, BackendResult] = {},
max_batch_size: int = 1000,
n_cores: Optional[int] = None,
):
"""Initialisation method.

:param transpiler: Compiler to use during noise simulation.
:type transpiler: BasePass
:param max_batch_size: Size of the largest batch of shots,
defaults to 100
defaults to 1000. The total number of shots is distributed between
bathes of size 1000 plus a smaller batch for left over shot.
These batches will be distributed to multiple cores.
:type max_batch_size: int, optional
:param result_dict: Results dictionary, may be used to store existing
results within backend, defaults to {}
:type result_dict: Dict[ResultHandle, BackendResult], optional
:param n_cores: Shots will be taken in parallel. This parameter
specifies the number of cores to use.
:type n_cores: Optional[int]. Defaults to None, using all available
cores.
"""

self.transpiler = transpiler

self.max_batch_size = max_batch_size
self.result_dict = result_dict
self.n_cores = n_cores

def default_compilation_pass(self, **kwargs) -> BasePass:
"""Return a compiler pass which has no affect on the circuit.
Expand Down Expand Up @@ -201,12 +212,41 @@ def _gen_batches(self, circuit: Circuit, n_shots: int) -> Iterator[List[Circuit]
for _ in range(n_shots % self.max_batch_size)
]

@staticmethod
def _get_batch_counts(
circuit_list: List[Circuit],
cbits_list: Optional[List[List]],
) -> Counter[Tuple[int, ...]]:
"""Run each circuit in the given list for one shot,
collating the results into a single counter.

:param circuit_list: The list of circuits to run for one shot each.
:type circuit_list: List[Circuit]
:param cbits_list: The classical bits to return the measurements of
:type cbits_list: Optional[List[List]]
:return: The collated counter object.
:rtype: Counter[Tuple[int, ...]]
"""

if cbits_list is not None:
cbits = [Bit.from_list(cbit_list) for cbit_list in cbits_list]
else:
cbits = None

backend = AerBackend()

result_list = backend.run_circuits(circuit_list, n_shots=1)
return sum(
(result.get_counts(cbits=cbits) for result in result_list),
Counter()
)

def get_counts(
self,
circuit: Circuit,
n_shots: int,
cbits: Optional[List[Bit]] = None,
) -> Counter:
) -> Counter[Tuple[int, ...]]:
"""Generate shots from the given circuit.

:param circuit: Circuit to take shots from.
Expand All @@ -221,11 +261,16 @@ def get_counts(
:rtype: Iterator[Counter]
"""

counter: Counter = Counter()
if cbits is not None:
cbits_list = [cbit.to_list() for cbit in cbits]
else:
cbits_list = None

for circuit_list in self._gen_batches(circuit, n_shots):
result_list = self.backend.run_circuits(circuit_list, n_shots=1)
counter += sum((result.get_counts(cbits=cbits)
for result in result_list), Counter())
with multiprocessing.Pool(self.n_cores) as pool:
processes = [
pool.apply_async(self._get_batch_counts, args=(circuit_list, cbits_list))
for circuit_list in self._gen_batches(circuit, n_shots)
]
counter_list = [p.get() for p in processes]

return counter
return sum(counter_list, Counter())
5 changes: 5 additions & 0 deletions tests/noise_model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from copy import deepcopy
import pytest
from qermit.noise_model.noise_model import Direction
import multiprocessing as mp


def test_to_ptm() -> None:
Expand Down Expand Up @@ -302,6 +303,8 @@ def test_to_dict(tmpdir_factory) -> None:
@pytest.mark.high_compute
def test_transpiler_backend() -> None:

mp.set_start_method("spawn", force=True)

circuit = Circuit(3)
for _ in range(32):
circuit.ZZMax(0, 1).ZZMax(1, 2)
Expand Down Expand Up @@ -361,6 +364,8 @@ def test_pauli_error_transpile() -> None:

def test_noise_model() -> None:

mp.set_start_method("spawn", force=True)

error_distribution_dict = {}
error_rate = 0.5
error_distribution_dict[(Pauli.X, Pauli.I)] = error_rate
Expand Down
5 changes: 5 additions & 0 deletions tests/zne_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
from qermit.zero_noise_extrapolation.zne import gen_noise_scaled_mitex
from qermit.noise_model import TranspilerBackend, PauliErrorTranspile
from itertools import product
import multiprocessing as mp

n_qubits = 2

Expand Down Expand Up @@ -848,6 +849,8 @@ def test_end_to_end_noise_scaled_mitex():
@pytest.mark.high_compute
def test_end_to_end_noise_aware_zne_mitex_starting_from_ptm() -> None:

mp.set_start_method("spawn", force=True)

# Here we are creating the PTM for a noise model acting
# XI with rate 0.1
ptm = np.diag([1, 1, 1, 1, 1, 1, 1, 1, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8])
Expand Down Expand Up @@ -907,6 +910,8 @@ def test_end_to_end_noise_aware_zne_mitex_starting_from_ptm() -> None:
@pytest.mark.high_compute
def test_end_to_end_noise_aware_zne_mitex():

mp.set_start_method("spawn", force=True)

error_rate = 0.1
error_distribution = ErrorDistribution(
distribution={(Pauli.X, Pauli.I): error_rate}
Expand Down
Loading