diff --git a/qermit/noise_model/transpiler_backend.py b/qermit/noise_model/transpiler_backend.py index f9a6eae5..9cd3c8e9 100644 --- a/qermit/noise_model/transpiler_backend.py +++ b/qermit/noise_model/transpiler_backend.py @@ -4,9 +4,10 @@ from pytket.utils.outcomearray import OutcomeArray import uuid from pytket.passes import BasePass, CustomPass -from typing import Dict, List, Optional, Iterator, Sequence, Iterable +from typing import Dict, List, Optional, Iterator, Sequence, Iterable, Tuple from pytket import Circuit, Bit from pytket.backends.resulthandle import ResultHandle +import multiprocessing class TranspilerBackend: @@ -22,35 +23,45 @@ class TranspilerBackend: shot batch size permitted. result_dict: A dictionary mapping handles to results. backend: Backend used to simulate compiled circuits. + n_cores: The number of cores used when simulating shots in parallel. """ transpiler: BasePass max_batch_size: int result_dict: Dict[ResultHandle, BackendResult] + n_cores: int backend = AerBackend() def __init__( self, transpiler: BasePass, - max_batch_size: int = 100, result_dict: Dict[ResultHandle, BackendResult] = {}, + max_batch_size: int = 1000, + n_cores: int = 1, ): """Initialisation method. :param transpiler: Compiler to use during noise simulation. :type transpiler: BasePass :param max_batch_size: Size of the largest batch of shots, - defaults to 100 + defaults to 1000. The total number of shots is distributed between + bathes of size 1000 plus a smaller batch for left over shot. + These batches will be distributed to multiple cores. :type max_batch_size: int, optional :param result_dict: Results dictionary, may be used to store existing results within backend, defaults to {} :type result_dict: Dict[ResultHandle, BackendResult], optional + :param n_cores: Shots will be taken in parallel. This parameter + specifies the number of cores to use. The default is to use + one core. + :type n_cores: Optional[int]. Defaults to 1. """ self.transpiler = transpiler self.max_batch_size = max_batch_size self.result_dict = result_dict + self.n_cores = n_cores def default_compilation_pass(self, **kwargs) -> BasePass: """Return a compiler pass which has no affect on the circuit. @@ -201,12 +212,41 @@ def _gen_batches(self, circuit: Circuit, n_shots: int) -> Iterator[List[Circuit] for _ in range(n_shots % self.max_batch_size) ] + @staticmethod + def _get_batch_counts( + circuit_list: List[Circuit], + cbits_list: Optional[List[List]], + ) -> Counter[Tuple[int, ...]]: + """Run each circuit in the given list for one shot, + collating the results into a single counter. + + :param circuit_list: The list of circuits to run for one shot each. + :type circuit_list: List[Circuit] + :param cbits_list: The classical bits to return the measurements of + :type cbits_list: Optional[List[List]] + :return: The collated counter object. + :rtype: Counter[Tuple[int, ...]] + """ + + if cbits_list is not None: + cbits = [Bit.from_list(cbit_list) for cbit_list in cbits_list] + else: + cbits = None + + backend = AerBackend() + + result_list = backend.run_circuits(circuit_list, n_shots=1) + return sum( + (result.get_counts(cbits=cbits) for result in result_list), + Counter() + ) + def get_counts( self, circuit: Circuit, n_shots: int, cbits: Optional[List[Bit]] = None, - ) -> Counter: + ) -> Counter[Tuple[int, ...]]: """Generate shots from the given circuit. :param circuit: Circuit to take shots from. @@ -221,11 +261,30 @@ def get_counts( :rtype: Iterator[Counter] """ - counter: Counter = Counter() + if self.n_cores > 1: + + if cbits is not None: + cbits_list = [cbit.to_list() for cbit in cbits] + else: + cbits_list = None + + with multiprocessing.Pool(self.n_cores) as pool: + processes = [ + pool.apply_async(self._get_batch_counts, args=(circuit_list, cbits_list)) + for circuit_list in self._gen_batches(circuit, n_shots) + ] + counter_list = [p.get() for p in processes] + + return sum(counter_list, Counter()) + + else: - for circuit_list in self._gen_batches(circuit, n_shots): - result_list = self.backend.run_circuits(circuit_list, n_shots=1) - counter += sum((result.get_counts(cbits=cbits) - for result in result_list), Counter()) + counter: Counter = Counter() + for circuit_list in self._gen_batches(circuit, n_shots): + result_list = self.backend.run_circuits(circuit_list, n_shots=1) + counter += sum( + (result.get_counts(cbits=cbits) for result in result_list), + Counter() + ) - return counter + return counter diff --git a/tests/noise_model_test.py b/tests/noise_model_test.py index 065b4832..f6c73658 100644 --- a/tests/noise_model_test.py +++ b/tests/noise_model_test.py @@ -16,6 +16,7 @@ from copy import deepcopy import pytest from qermit.noise_model.noise_model import Direction +import multiprocessing as mp def test_to_ptm() -> None: @@ -302,6 +303,8 @@ def test_to_dict(tmpdir_factory) -> None: @pytest.mark.high_compute def test_transpiler_backend() -> None: + mp.set_start_method("spawn", force=True) + circuit = Circuit(3) for _ in range(32): circuit.ZZMax(0, 1).ZZMax(1, 2) @@ -361,6 +364,8 @@ def test_pauli_error_transpile() -> None: def test_noise_model() -> None: + mp.set_start_method("spawn", force=True) + error_distribution_dict = {} error_rate = 0.5 error_distribution_dict[(Pauli.X, Pauli.I)] = error_rate diff --git a/tests/zne_test.py b/tests/zne_test.py index 539a6de2..41837611 100644 --- a/tests/zne_test.py +++ b/tests/zne_test.py @@ -50,6 +50,7 @@ from qermit.zero_noise_extrapolation.zne import gen_noise_scaled_mitex from qermit.noise_model import TranspilerBackend, PauliErrorTranspile from itertools import product +import multiprocessing as mp n_qubits = 2 @@ -848,6 +849,8 @@ def test_end_to_end_noise_scaled_mitex(): @pytest.mark.high_compute def test_end_to_end_noise_aware_zne_mitex_starting_from_ptm() -> None: + mp.set_start_method("spawn", force=True) + # Here we are creating the PTM for a noise model acting # XI with rate 0.1 ptm = np.diag([1, 1, 1, 1, 1, 1, 1, 1, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8]) @@ -860,7 +863,10 @@ def test_end_to_end_noise_aware_zne_mitex_starting_from_ptm() -> None: } ) transpiler = PauliErrorTranspile(noise_model=noise_model) - backend = TranspilerBackend(transpiler=transpiler) + backend = TranspilerBackend( + transpiler=transpiler, + n_cores=1, + ) # Here we perform ZNE with some unevenly spaced # noise scaling values. @@ -907,6 +913,8 @@ def test_end_to_end_noise_aware_zne_mitex_starting_from_ptm() -> None: @pytest.mark.high_compute def test_end_to_end_noise_aware_zne_mitex(): + mp.set_start_method("spawn", force=True) + error_rate = 0.1 error_distribution = ErrorDistribution( distribution={(Pauli.X, Pauli.I): error_rate} @@ -915,7 +923,10 @@ def test_end_to_end_noise_aware_zne_mitex(): noise_model={OpType.CZ: error_distribution} ) transpiler = PauliErrorTranspile(noise_model=noise_model) - backend = TranspilerBackend(transpiler=transpiler) + backend = TranspilerBackend( + transpiler=transpiler, + n_cores=1, + ) zne_mitex = gen_ZNE_MitEx( backend=backend,