Skip to content
This repository was archived by the owner on Aug 14, 2024. It is now read-only.

Commit

Permalink
Run tox -eblack
Browse files Browse the repository at this point in the history
  • Loading branch information
hunterkemeny committed Feb 12, 2024
1 parent 0a0ef9c commit 534203b
Showing 1 changed file with 11 additions and 9 deletions.
20 changes: 11 additions & 9 deletions red_queen/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

# The functions get_qubit depths, get_maximum_qubit_depths, and get_circuit_depths are from
# The functions get_qubit depths, get_maximum_qubit_depths, and get_circuit_depths are from
# QASMBench's QMetric.py module and adapted to work with red-queen.
# The original source can be found at:
#
Expand Down Expand Up @@ -94,7 +94,7 @@ def __init__(
second_compiler_readout: str,
):
"""
:param compiler_dict: dictionary of compiler info --> {"compiler": "COMPILER_NAME",
:param compiler_dict: dictionary of compiler info --> {"compiler": "COMPILER_NAME",
"version": "VERSION NUM", "optimization_level": OPTIMIZATION_LEVEL}
:param backend: name of backend to be used --> "BACKEND_NAME"
:param num_runs: number of times to run each benchmark
Expand All @@ -121,7 +121,9 @@ def get_qasm_benchmark(self, qasm_name):
benchmarking_path = os.path.join(
os.path.dirname(__file__), "benchmarking", "benchmarks"
)
with open(os.path.join(benchmarking_path, qasm_name), "r", encoding='utf-8') as file:
with open(
os.path.join(benchmarking_path, qasm_name), "r", encoding="utf-8"
) as file:
qasm = file.read()
return qasm

Expand Down Expand Up @@ -178,7 +180,7 @@ def run_benchmarks(self):
logger.info(
"Running benchmark %s of %s...",
logger_counter,
self.num_runs * len(self.full_benchmark_list)
self.num_runs * len(self.full_benchmark_list),
)
self.run_benchmark(benchmark)
logger_counter += 1
Expand All @@ -203,16 +205,16 @@ def save_results(self):
f"results_run{run_number - 1}.json",
)

with open(results_path, "r", encoding='utf-8') as json_file:
with open(results_path, "r", encoding="utf-8") as json_file:
data = json.load(json_file)
data.append(self.metric_data)
with open(results_path, "w", encoding='utf-8') as json_file:
with open(results_path, "w", encoding="utf-8") as json_file:
json.dump(data, json_file)
else:
results_path = os.path.join(
os.path.dirname(__file__), "results", f"results_run{run_number}.json"
)
with open(results_path, "w", encoding='utf-8') as json_file:
with open(results_path, "w", encoding="utf-8") as json_file:
json.dump([self.metric_data], json_file)

def transpile_in_process(self, benchmark, optimization_level):
Expand Down Expand Up @@ -252,7 +254,7 @@ def run_benchmark(self, benchmark):
#############################
# MEMORY FOOTPRINT
#############################

# Add memory_footprint to dictionary corresponding to this benchmark
logger.info("Calculating memory footprint...")
# Multiprocesss transpilation to get accurate memory usage
Expand Down Expand Up @@ -294,7 +296,7 @@ def run_benchmark(self, benchmark):
+ +self.metric_data[benchmark_name]["parsing/build_time (seconds)"][-1]
+ self.metric_data[benchmark_name]["transpile_time (seconds)"][-1]
)

#############################
# DEPTH
#############################
Expand Down

0 comments on commit 534203b

Please sign in to comment.